title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DOC: Fixes to the whatsnew structure
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 145acbc55b85f..b85150c3444b7 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -164,5 +164,5 @@ See the package overview for more detail about what's in the library. developer internals extending - releases + whatsnew/index {% endif -%} diff --git a/doc/source/releases.rst b/doc/source/whatsnew/index.rst similarity index 62% rename from doc/source/releases.rst rename to doc/source/whatsnew/index.rst index 491569c69115b..d0aab4012ffd5 100644 --- a/doc/source/releases.rst +++ b/doc/source/whatsnew/index.rst @@ -16,7 +16,7 @@ Version 0.24 .. toctree:: :maxdepth: 2 - whatsnew/v0.24.0 + v0.24.0 Version 0.23 ------------ @@ -24,11 +24,11 @@ Version 0.23 .. toctree:: :maxdepth: 2 - whatsnew/v0.23.4 - whatsnew/v0.23.3 - whatsnew/v0.23.2 - whatsnew/v0.23.1 - whatsnew/v0.23.0 + v0.23.4 + v0.23.3 + v0.23.2 + v0.23.1 + v0.23.0 Version 0.22 ------------ @@ -36,7 +36,7 @@ Version 0.22 .. toctree:: :maxdepth: 2 - whatsnew/v0.22.0 + v0.22.0 Version 0.21 ------------ @@ -44,8 +44,8 @@ Version 0.21 .. toctree:: :maxdepth: 2 - whatsnew/v0.21.0 - whatsnew/v0.21.1 + v0.21.1 + v0.21.0 Version 0.20 ------------ @@ -53,9 +53,9 @@ Version 0.20 .. toctree:: :maxdepth: 2 - whatsnew/v0.20.0 - whatsnew/v0.20.2 - whatsnew/v0.20.3 + v0.20.3 + v0.20.2 + v0.20.0 Version 0.19 ------------ @@ -63,9 +63,9 @@ Version 0.19 .. toctree:: :maxdepth: 2 - whatsnew/v0.19.0 - whatsnew/v0.19.1 - whatsnew/v0.19.2 + v0.19.2 + v0.19.1 + v0.19.0 Version 0.18 ------------ @@ -73,8 +73,8 @@ Version 0.18 .. toctree:: :maxdepth: 2 - whatsnew/v0.18.0 - whatsnew/v0.18.1 + v0.18.1 + v0.18.0 Version 0.17 ------------ @@ -82,8 +82,8 @@ Version 0.17 .. toctree:: :maxdepth: 2 - whatsnew/v0.17.0 - whatsnew/v0.17.1 + v0.17.1 + v0.17.0 Version 0.16 ------------ @@ -91,9 +91,9 @@ Version 0.16 .. toctree:: :maxdepth: 2 - whatsnew/v0.16.0 - whatsnew/v0.16.1 - whatsnew/v0.16.2 + v0.16.2 + v0.16.1 + v0.16.0 Version 0.15 ------------ @@ -101,9 +101,9 @@ Version 0.15 .. toctree:: :maxdepth: 2 - whatsnew/v0.15.0 - whatsnew/v0.15.1 - whatsnew/v0.15.2 + v0.15.2 + v0.15.1 + v0.15.0 Version 0.14 ------------ @@ -111,8 +111,8 @@ Version 0.14 .. toctree:: :maxdepth: 2 - whatsnew/v0.14.0 - whatsnew/v0.14.1 + v0.14.1 + v0.14.0 Version 0.13 ------------ @@ -120,8 +120,8 @@ Version 0.13 .. toctree:: :maxdepth: 2 - whatsnew/v0.13.0 - whatsnew/v0.13.1 + v0.13.1 + v0.13.0 Version 0.12 ------------ @@ -129,7 +129,7 @@ Version 0.12 .. toctree:: :maxdepth: 2 - whatsnew/v0.12.0 + v0.12.0 Version 0.11 ------------ @@ -137,7 +137,7 @@ Version 0.11 .. toctree:: :maxdepth: 2 - whatsnew/v0.11.0 + v0.11.0 Version 0.10 ------------ @@ -145,8 +145,8 @@ Version 0.10 .. toctree:: :maxdepth: 2 - whatsnew/v0.10.0 - whatsnew/v0.10.1 + v0.10.1 + v0.10.0 Version 0.9 ----------- @@ -154,8 +154,8 @@ Version 0.9 .. toctree:: :maxdepth: 2 - whatsnew/v0.9.0 - whatsnew/v0.9.1 + v0.9.1 + v0.9.0 Version 0.8 ------------ @@ -163,8 +163,8 @@ Version 0.8 .. toctree:: :maxdepth: 2 - whatsnew/v0.8.0 - whatsnew/v0.8.1 + v0.8.1 + v0.8.0 Version 0.7 ----------- @@ -172,10 +172,10 @@ Version 0.7 .. toctree:: :maxdepth: 2 - whatsnew/v0.7.0 - whatsnew/v0.7.1 - whatsnew/v0.7.2 - whatsnew/v0.7.3 + v0.7.3 + v0.7.2 + v0.7.1 + v0.7.0 Version 0.6 ----------- @@ -183,9 +183,8 @@ Version 0.6 .. toctree:: :maxdepth: 2 - - whatsnew/v0.6.0 - whatsnew/v0.6.1 + v0.6.1 + v0.6.0 Version 0.5 ----------- @@ -193,8 +192,7 @@ Version 0.5 .. toctree:: :maxdepth: 2 - - whatsnew/v0.5.0 + v0.5.0 Version 0.4 ----------- @@ -202,4 +200,4 @@ Version 0.4 .. toctree:: :maxdepth: 2 - whatsnew/v0.4.x + v0.4.x diff --git a/doc/source/whatsnew/v0.4.x.rst b/doc/source/whatsnew/v0.4.x.rst index ebf5286a41b6e..0c2047ee69b81 100644 --- a/doc/source/whatsnew/v0.4.x.rst +++ b/doc/source/whatsnew/v0.4.x.rst @@ -1,6 +1,6 @@ .. _whatsnew_04x: -v.0.4.3 through v0.4.1 (September 25 - October 9, 2011) +v.0.4.1 through v0.4.3 (September 25 - October 9, 2011) ------------------------------------------------------- {{ header }}
- [X] xref #24499 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Changes in this PR: - Moving `releases.rst` to `whatsnew/index.rst`, which is more consistent with the new structure of the documentation - Sorting the `whatsnew/index` from the newest to the oldest (was from newest to oldest for the major version, but inside them, the minor versions were sorted from oldest to newest, except for 0.23). CC: @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/24679
2019-01-09T02:08:38Z
2019-01-09T12:33:45Z
2019-01-09T12:33:45Z
2019-01-09T12:33:46Z
DOC: Creating top-level getting started section, and moving pages inside
diff --git a/doc/source/10min.rst b/doc/source/getting_started/10min.rst similarity index 100% rename from doc/source/10min.rst rename to doc/source/getting_started/10min.rst diff --git a/doc/source/basics.rst b/doc/source/getting_started/basics.rst similarity index 100% rename from doc/source/basics.rst rename to doc/source/getting_started/basics.rst diff --git a/doc/source/dsintro.rst b/doc/source/getting_started/dsintro.rst similarity index 100% rename from doc/source/dsintro.rst rename to doc/source/getting_started/dsintro.rst diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst new file mode 100644 index 0000000000000..116efe79beef1 --- /dev/null +++ b/doc/source/getting_started/index.rst @@ -0,0 +1,16 @@ +{{ header }} + +.. _getting_started: + +=============== +Getting started +=============== + +.. toctree:: + :maxdepth: 2 + + overview + 10min + basics + dsintro + tutorials diff --git a/doc/source/overview.rst b/doc/source/getting_started/overview.rst similarity index 99% rename from doc/source/overview.rst rename to doc/source/getting_started/overview.rst index b98e2d4b9963c..1e07df47aadca 100644 --- a/doc/source/overview.rst +++ b/doc/source/getting_started/overview.rst @@ -119,5 +119,5 @@ The information about current institutional partners can be found on `pandas web License ------- -.. literalinclude:: ../../LICENSE +.. literalinclude:: ../../../LICENSE diff --git a/doc/source/tutorials.rst b/doc/source/getting_started/tutorials.rst similarity index 100% rename from doc/source/tutorials.rst rename to doc/source/getting_started/tutorials.rst diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 53f116a1a0e0a..bc420a906b59c 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -124,12 +124,8 @@ See the package overview for more detail about what's in the library. {% if not single_doc -%} What's New <whatsnew/v0.24.0> install - overview - 10min - tutorials + getting_started/index cookbook - dsintro - basics user_guide/index r_interface ecosystem diff --git a/setup.cfg b/setup.cfg index 6143cb8446216..7155cc1013544 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,7 +46,7 @@ ignore = E402, # module level import not at top of file E711, # comparison to none should be 'if cond is none:' exclude = - doc/source/basics.rst + doc/source/getting_started/basics.rst doc/source/development/contributing_docstring.rst
- [X] xref #24499 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24678
2019-01-09T01:50:44Z
2019-01-23T17:52:12Z
2019-01-23T17:52:12Z
2019-01-23T17:52:12Z
DOC: Creating top-level user guide section, and moving pages inside
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index b85150c3444b7..571c906acbd43 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -131,25 +131,7 @@ See the package overview for more detail about what's in the library. cookbook dsintro basics - text - options - indexing - advanced - computation - missing_data - groupby - merging - reshaping - timeseries - timedeltas - categorical - integer_na - visualization - style - io - enhancingperf - sparse - gotchas + user_guide/index r_interface ecosystem comparison_with_r diff --git a/doc/source/advanced.rst b/doc/source/user_guide/advanced.rst similarity index 100% rename from doc/source/advanced.rst rename to doc/source/user_guide/advanced.rst diff --git a/doc/source/categorical.rst b/doc/source/user_guide/categorical.rst similarity index 100% rename from doc/source/categorical.rst rename to doc/source/user_guide/categorical.rst diff --git a/doc/source/computation.rst b/doc/source/user_guide/computation.rst similarity index 100% rename from doc/source/computation.rst rename to doc/source/user_guide/computation.rst diff --git a/doc/source/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst similarity index 99% rename from doc/source/enhancingperf.rst rename to doc/source/user_guide/enhancingperf.rst index 0e3d389aa4f6e..9941ffcc9de4d 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -783,7 +783,7 @@ significant performance benefit. Here is a plot showing the running time of computation. The two lines are two different engines. -.. image:: _static/eval-perf.png +.. image:: ../_static/eval-perf.png .. note:: @@ -791,7 +791,7 @@ computation. The two lines are two different engines. Operations with smallish objects (around 15k-20k rows) are faster using plain Python: - .. image:: _static/eval-perf-small.png + .. image:: ../_static/eval-perf-small.png This plot was created using a ``DataFrame`` with 3 columns each containing diff --git a/doc/source/gotchas.rst b/doc/source/user_guide/gotchas.rst similarity index 100% rename from doc/source/gotchas.rst rename to doc/source/user_guide/gotchas.rst diff --git a/doc/source/groupby.rst b/doc/source/user_guide/groupby.rst similarity index 100% rename from doc/source/groupby.rst rename to doc/source/user_guide/groupby.rst diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst new file mode 100644 index 0000000000000..60e722808d647 --- /dev/null +++ b/doc/source/user_guide/index.rst @@ -0,0 +1,39 @@ +{{ header }} + +.. _user_guide: + +========== +User Guide +========== + +The User Guide covers all of pandas by topic area. Each of the subsections +introduces a topic (such as "working with missing data"), and discusses how +pandas approaches the problem, with many examples throughout. + +Users brand-new to pandas should start with :ref:`10min`. + +Further information on any specific method can be obtained in the +:ref:`api`. + +.. toctree:: + :maxdepth: 2 + + io + indexing + advanced + merging + reshaping + text + missing_data + categorical + integer_na + visualization + computation + groupby + timeseries + timedeltas + style + options + enhancingperf + sparse + gotchas diff --git a/doc/source/indexing.rst b/doc/source/user_guide/indexing.rst similarity index 99% rename from doc/source/indexing.rst rename to doc/source/user_guide/indexing.rst index 3fe416c48f670..be1745e2664a1 100644 --- a/doc/source/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1392,7 +1392,7 @@ Performance of :meth:`~pandas.DataFrame.query` ``DataFrame.query()`` using ``numexpr`` is slightly faster than Python for large frames. -.. image:: _static/query-perf.png +.. image:: ../_static/query-perf.png .. note:: @@ -1400,7 +1400,7 @@ large frames. with ``DataFrame.query()`` if your frame has more than approximately 200,000 rows. - .. image:: _static/query-perf-small.png + .. image:: ../_static/query-perf-small.png This plot was created using a ``DataFrame`` with 3 columns each containing floating point values generated using ``numpy.random.randn()``. diff --git a/doc/source/integer_na.rst b/doc/source/user_guide/integer_na.rst similarity index 100% rename from doc/source/integer_na.rst rename to doc/source/user_guide/integer_na.rst diff --git a/doc/source/io.rst b/doc/source/user_guide/io.rst similarity index 99% rename from doc/source/io.rst rename to doc/source/user_guide/io.rst index dd1cde0bdff73..0132392aacaff 100644 --- a/doc/source/io.rst +++ b/doc/source/user_guide/io.rst @@ -2549,7 +2549,7 @@ in the method ``to_string`` described above. HTML: .. raw:: html - :file: _static/basic.html + :file: ../_static/basic.html The ``columns`` argument will limit the columns shown: @@ -2565,7 +2565,7 @@ The ``columns`` argument will limit the columns shown: HTML: .. raw:: html - :file: _static/columns.html + :file: ../_static/columns.html ``float_format`` takes a Python callable to control the precision of floating point values: @@ -2582,7 +2582,7 @@ point values: HTML: .. raw:: html - :file: _static/float_format.html + :file: ../_static/float_format.html ``bold_rows`` will make the row labels bold by default, but you can turn that off: @@ -2597,7 +2597,7 @@ off: write_html(df, 'nobold', bold_rows=False) .. raw:: html - :file: _static/nobold.html + :file: ../_static/nobold.html The ``classes`` argument provides the ability to give the resulting HTML table CSS classes. Note that these classes are *appended* to the existing @@ -2627,7 +2627,7 @@ that contain URLs. HTML: .. raw:: html - :file: _static/render_links.html + :file: ../_static/render_links.html Finally, the ``escape`` argument allows you to control whether the "<", ">" and "&" characters escaped in the resulting HTML (by default it is @@ -2651,7 +2651,7 @@ Escaped: print(df.to_html()) .. raw:: html - :file: _static/escape.html + :file: ../_static/escape.html Not escaped: @@ -2660,7 +2660,7 @@ Not escaped: print(df.to_html(escape=False)) .. raw:: html - :file: _static/noescape.html + :file: ../_static/noescape.html .. note:: diff --git a/doc/source/merging.rst b/doc/source/user_guide/merging.rst similarity index 100% rename from doc/source/merging.rst rename to doc/source/user_guide/merging.rst diff --git a/doc/source/missing_data.rst b/doc/source/user_guide/missing_data.rst similarity index 100% rename from doc/source/missing_data.rst rename to doc/source/user_guide/missing_data.rst diff --git a/doc/source/options.rst b/doc/source/user_guide/options.rst similarity index 99% rename from doc/source/options.rst rename to doc/source/user_guide/options.rst index e91be3e6ae730..d640d8b1153c5 100644 --- a/doc/source/options.rst +++ b/doc/source/user_guide/options.rst @@ -487,7 +487,7 @@ If a DataFrame or Series contains these characters, the default output mode may df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']}) df -.. image:: _static/option_unicode01.png +.. image:: ../_static/option_unicode01.png Enabling ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property. These characters can be aligned properly by setting this option to ``True``. However, this will result in longer render @@ -498,7 +498,7 @@ times than the standard ``len`` function. pd.set_option('display.unicode.east_asian_width', True) df -.. image:: _static/option_unicode02.png +.. image:: ../_static/option_unicode02.png In addition, Unicode characters whose width is "Ambiguous" can either be 1 or 2 characters wide depending on the terminal setting or encoding. The option ``display.unicode.ambiguous_as_wide`` can be used to handle the ambiguity. @@ -510,7 +510,7 @@ By default, an "Ambiguous" character's width, such as "¡" (inverted exclamation df = pd.DataFrame({'a': ['xxx', u'¡¡'], 'b': ['yyy', u'¡¡']}) df -.. image:: _static/option_unicode03.png +.. image:: ../_static/option_unicode03.png Enabling ``display.unicode.ambiguous_as_wide`` makes pandas interpret these characters' widths to be 2. (Note that this option will only be effective when ``display.unicode.east_asian_width`` is enabled.) @@ -522,7 +522,7 @@ However, setting this option incorrectly for your terminal will cause these char pd.set_option('display.unicode.ambiguous_as_wide', True) df -.. image:: _static/option_unicode04.png +.. image:: ../_static/option_unicode04.png .. ipython:: python :suppress: diff --git a/doc/source/reshaping.rst b/doc/source/user_guide/reshaping.rst similarity index 98% rename from doc/source/reshaping.rst rename to doc/source/user_guide/reshaping.rst index 9891e22e9d552..5c11be34e6ed4 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -9,7 +9,7 @@ Reshaping and Pivot Tables Reshaping by pivoting DataFrame objects --------------------------------------- -.. image:: _static/reshaping_pivot.png +.. image:: ../_static/reshaping_pivot.png .. ipython:: python :suppress: @@ -101,7 +101,7 @@ are homogeneously-typed. Reshaping by stacking and unstacking ------------------------------------ -.. image:: _static/reshaping_stack.png +.. image:: ../_static/reshaping_stack.png Closely related to the :meth:`~DataFrame.pivot` method are the related :meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on @@ -116,7 +116,7 @@ Closely related to the :meth:`~DataFrame.pivot` method are the related (possibly hierarchical) row index to the column axis, producing a reshaped ``DataFrame`` with a new inner-most level of column labels. -.. image:: _static/reshaping_unstack.png +.. image:: ../_static/reshaping_unstack.png The clearest way to explain is by example. Let's take a prior example data set from the hierarchical indexing section: @@ -158,7 +158,7 @@ unstacks the **last level**: .. _reshaping.unstack_by_name: -.. image:: _static/reshaping_unstack_1.png +.. image:: ../_static/reshaping_unstack_1.png If the indexes have names, you can use the level names instead of specifying the level numbers: @@ -168,7 +168,7 @@ the level numbers: stacked.unstack('second') -.. image:: _static/reshaping_unstack_0.png +.. image:: ../_static/reshaping_unstack_0.png Notice that the ``stack`` and ``unstack`` methods implicitly sort the index levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa, @@ -279,7 +279,7 @@ the right thing: Reshaping by Melt ----------------- -.. image:: _static/reshaping_melt.png +.. image:: ../_static/reshaping_melt.png The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt` are useful to massage a ``DataFrame`` into a format where one or more columns diff --git a/doc/source/sparse.rst b/doc/source/user_guide/sparse.rst similarity index 100% rename from doc/source/sparse.rst rename to doc/source/user_guide/sparse.rst diff --git a/doc/source/style.ipynb b/doc/source/user_guide/style.ipynb similarity index 99% rename from doc/source/style.ipynb rename to doc/source/user_guide/style.ipynb index 792fe5120f6e8..a238c3b16e9ad 100644 --- a/doc/source/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -992,7 +992,7 @@ "source": [ "A screenshot of the output:\n", "\n", - "![Excel spreadsheet with styled DataFrame](_static/style-excel.png)\n" + "![Excel spreadsheet with styled DataFrame](../_static/style-excel.png)\n" ] }, { diff --git a/doc/source/text.rst b/doc/source/user_guide/text.rst similarity index 100% rename from doc/source/text.rst rename to doc/source/user_guide/text.rst diff --git a/doc/source/timedeltas.rst b/doc/source/user_guide/timedeltas.rst similarity index 100% rename from doc/source/timedeltas.rst rename to doc/source/user_guide/timedeltas.rst diff --git a/doc/source/timeseries.rst b/doc/source/user_guide/timeseries.rst similarity index 100% rename from doc/source/timeseries.rst rename to doc/source/user_guide/timeseries.rst diff --git a/doc/source/visualization.rst b/doc/source/user_guide/visualization.rst similarity index 100% rename from doc/source/visualization.rst rename to doc/source/user_guide/visualization.rst
- [X] xref #24499 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is part of #24499, will be moving the pages in different PRs, so it's easier to discuss.
https://api.github.com/repos/pandas-dev/pandas/pulls/24677
2019-01-09T01:25:29Z
2019-01-23T16:22:24Z
2019-01-23T16:22:24Z
2019-01-23T16:22:25Z
STY: use pytest.raises context syntax
diff --git a/pandas/tests/io/msgpack/test_sequnpack.py b/pandas/tests/io/msgpack/test_sequnpack.py index 48f9817142762..91f5778a7ce6c 100644 --- a/pandas/tests/io/msgpack/test_sequnpack.py +++ b/pandas/tests/io/msgpack/test_sequnpack.py @@ -30,7 +30,9 @@ def test_foobar(self): assert unpacker.unpack() == ord(b'b') assert unpacker.unpack() == ord(b'a') assert unpacker.unpack() == ord(b'r') - pytest.raises(OutOfData, unpacker.unpack) + msg = "No more data to unpack" + with pytest.raises(OutOfData, match=msg): + unpacker.unpack() unpacker.feed(b'foo') unpacker.feed(b'bar') @@ -50,14 +52,24 @@ def test_foobar_skip(self): unpacker.skip() assert unpacker.unpack() == ord(b'a') unpacker.skip() - pytest.raises(OutOfData, unpacker.unpack) + msg = "No more data to unpack" + with pytest.raises(OutOfData, match=msg): + unpacker.unpack() + + def test_maxbuffersize_read_size_exceeds_max_buffer_size(self): + msg = "read_size should be less or equal to max_buffer_size" + with pytest.raises(ValueError, match=msg): + Unpacker(read_size=5, max_buffer_size=3) + + def test_maxbuffersize_bufferfull(self): + unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1) + unpacker.feed(b'foo') + with pytest.raises(BufferFull, match=r'^$'): + unpacker.feed(b'b') def test_maxbuffersize(self): - pytest.raises(ValueError, Unpacker, read_size=5, max_buffer_size=3) unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1) - unpacker.feed(b'fo') - pytest.raises(BufferFull, unpacker.feed, b'ob') - unpacker.feed(b'o') + unpacker.feed(b'foo') assert ord('f') == next(unpacker) unpacker.feed(b'b') assert ord('o') == next(unpacker) diff --git a/pandas/tests/io/msgpack/test_unpack.py b/pandas/tests/io/msgpack/test_unpack.py index e63631a97bbb4..356156296c067 100644 --- a/pandas/tests/io/msgpack/test_unpack.py +++ b/pandas/tests/io/msgpack/test_unpack.py @@ -16,7 +16,9 @@ def test_unpack_array_header_from_file(self): assert unpacker.unpack() == 2 assert unpacker.unpack() == 3 assert unpacker.unpack() == 4 - pytest.raises(OutOfData, unpacker.unpack) + msg = "No more data to unpack" + with pytest.raises(OutOfData, match=msg): + unpacker.unpack() def test_unpacker_hook_refcnt(self): if not hasattr(sys, 'getrefcount'): diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index 0fd92cb496df3..8119de67890a5 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -145,7 +145,10 @@ def test_skip_bad_lines(self, capsys): reader = TextReader(StringIO(data), delimiter=':', header=None) - pytest.raises(parser.ParserError, reader.read) + msg = (r"Error tokenizing data\. C error: Expected 3 fields in" + " line 4, saw 4") + with pytest.raises(parser.ParserError, match=msg): + reader.read() reader = TextReader(StringIO(data), delimiter=':', header=None, diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 13a8b1a0edfd3..3354bca63be92 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -3,7 +3,6 @@ """ import mmap import os -import re import pytest @@ -146,7 +145,16 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): pytest.importorskip(module) path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext) - with pytest.raises(error_class): + msg1 = (r"File (b')?.+does_not_exist\.{}'? does not exist" + .format(fn_ext)) + msg2 = (r"\[Errno 2\] No such file or directory: '.+does_not_exist" + r"\.{}'").format(fn_ext) + msg3 = "Expected object or value" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = (r"\[Errno 2\] File .+does_not_exist\.{} does not exist:" + r" '.+does_not_exist\.{}'").format(fn_ext, fn_ext) + with pytest.raises(error_class, match=r"({}|{}|{}|{}|{})".format( + msg1, msg2, msg3, msg4, msg5)): reader(path) @pytest.mark.parametrize('reader, module, error_class, fn_ext', [ @@ -169,14 +177,26 @@ def test_read_expands_user_home_dir(self, reader, module, monkeypatch.setattr(icom, '_expand_user', lambda x: os.path.join('foo', x)) - message = "".join(["foo", os.path.sep, "does_not_exist.", fn_ext]) - - with pytest.raises(error_class, message=re.escape(message)): + msg1 = (r"File (b')?.+does_not_exist\.{}'? does not exist" + .format(fn_ext)) + msg2 = (r"\[Errno 2\] No such file or directory:" + r" '.+does_not_exist\.{}'").format(fn_ext) + msg3 = "Unexpected character found when decoding 'false'" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = (r"\[Errno 2\] File .+does_not_exist\.{} does not exist:" + r" '.+does_not_exist\.{}'").format(fn_ext, fn_ext) + + with pytest.raises(error_class, match=r"({}|{}|{}|{}|{})".format( + msg1, msg2, msg3, msg4, msg5)): reader(path) def test_read_non_existant_read_table(self): path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv') - with pytest.raises(FileNotFoundError): + msg1 = r"File b'.+does_not_exist\.csv' does not exist" + msg2 = (r"\[Errno 2\] File .+does_not_exist\.csv does not exist:" + r" '.+does_not_exist\.csv'") + with pytest.raises(FileNotFoundError, match=r"({}|{})".format( + msg1, msg2)): with tm.assert_produces_warning(FutureWarning): pd.read_table(path) @@ -326,7 +346,8 @@ def test_next(self, mmap_file): next_line = next(wrapper) assert next_line.strip() == line.strip() - pytest.raises(StopIteration, next, wrapper) + with pytest.raises(StopIteration, match=r'^$'): + next(wrapper) def test_unknown_engine(self): with tm.ensure_clean() as path: diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index da0a9ed4ba7ed..9eb6d327be025 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -156,9 +156,14 @@ class A(object): def __init__(self): self.read = 0 - pytest.raises(ValueError, read_msgpack, path_or_buf=None) - pytest.raises(ValueError, read_msgpack, path_or_buf={}) - pytest.raises(ValueError, read_msgpack, path_or_buf=A()) + msg = (r"Invalid file path or buffer object type: <(class|type)" + r" '{}'>") + with pytest.raises(ValueError, match=msg.format('NoneType')): + read_msgpack(path_or_buf=None) + with pytest.raises(ValueError, match=msg.format('dict')): + read_msgpack(path_or_buf={}) + with pytest.raises(ValueError, match=msg.format(r'.*\.A')): + read_msgpack(path_or_buf=A()) class TestNumpy(TestPackers): @@ -567,7 +572,9 @@ def _check_roundtrip(self, obj, comparator, **kwargs): # currently these are not implemetned # i_rec = self.encode_decode(obj) # comparator(obj, i_rec, **kwargs) - pytest.raises(NotImplementedError, self.encode_decode, obj) + msg = r"msgpack sparse (series|frame) is not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.encode_decode(obj) def test_sparse_series(self): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index ce9be6a7857bf..586297d2e3872 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -506,7 +506,8 @@ def test_invalid_timestamp(self, version): original = DataFrame([(1,)], columns=['variable']) time_stamp = '01 Jan 2000, 00:00:00' with tm.ensure_clean() as path: - with pytest.raises(ValueError): + msg = "time_stamp should be datetime type" + with pytest.raises(ValueError, match=msg): original.to_stata(path, time_stamp=time_stamp, version=version) @@ -547,8 +548,8 @@ def test_no_index(self): with tm.ensure_clean() as path: original.to_stata(path, write_index=False) written_and_read_again = self.read_dta(path) - pytest.raises( - KeyError, lambda: written_and_read_again['index_not_written']) + with pytest.raises(KeyError, match=original.index.name): + written_and_read_again['index_not_written'] def test_string_no_dates(self): s1 = Series(['a', 'A longer string']) @@ -713,7 +714,11 @@ def test_excessively_long_string(self): s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) - with pytest.raises(ValueError): + msg = (r"Fixed width strings in Stata \.dta files are limited to 244" + r" \(or fewer\)\ncharacters\. Column 's500' does not satisfy" + r" this restriction\. Use the\n'version=117' parameter to write" + r" the newer \(Stata 13 and later\) format\.") + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: original.to_stata(path) @@ -864,11 +869,14 @@ def test_drop_column(self): columns=columns) tm.assert_frame_equal(expected, reordered) - with pytest.raises(ValueError): + msg = "columns contains duplicate entries" + with pytest.raises(ValueError, match=msg): columns = ['byte_', 'byte_'] read_stata(self.dta15_117, convert_dates=True, columns=columns) - with pytest.raises(ValueError): + msg = ("The following columns were not found in the Stata data set:" + " not_found") + with pytest.raises(ValueError, match=msg): columns = ['byte_', 'int_', 'long_', 'not_found'] read_stata(self.dta15_117, convert_dates=True, columns=columns) @@ -924,7 +932,10 @@ def test_categorical_warnings_and_errors(self): original = pd.concat([original[col].astype('category') for col in original], axis=1) with tm.ensure_clean() as path: - pytest.raises(ValueError, original.to_stata, path) + msg = ("Stata value labels for a single variable must have" + r" a combined length less than 32,000 characters\.") + with pytest.raises(ValueError, match=msg): + original.to_stata(path) original = pd.DataFrame.from_records( [['a'], @@ -1196,14 +1207,17 @@ def test_invalid_variable_labels(self, version): 'b': 'City Exponent', 'c': 'City'} with tm.ensure_clean() as path: - with pytest.raises(ValueError): + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): original.to_stata(path, variable_labels=variable_labels, version=version) variable_labels['a'] = u'invalid character Œ' with tm.ensure_clean() as path: - with pytest.raises(ValueError): + msg = ("Variable labels must contain only characters that can be" + " encoded in Latin-1") + with pytest.raises(ValueError, match=msg): original.to_stata(path, variable_labels=variable_labels, version=version) @@ -1221,7 +1235,9 @@ def test_write_variable_label_errors(self): 'b': 'City Exponent', 'c': u''.join(values)} - with pytest.raises(ValueError): + msg = ("Variable labels must contain only characters that can be" + " encoded in Latin-1") + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_utf8) @@ -1231,7 +1247,8 @@ def test_write_variable_label_errors(self): 'that is too long for Stata which means ' 'that it has more than 80 characters'} - with pytest.raises(ValueError): + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_long) @@ -1265,7 +1282,8 @@ def test_default_date_conversion(self): def test_unsupported_type(self): original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]}) - with pytest.raises(NotImplementedError): + msg = "Data type complex128 not supported" + with pytest.raises(NotImplementedError, match=msg): with tm.ensure_clean() as path: original.to_stata(path) @@ -1277,7 +1295,8 @@ def test_unsupported_datetype(self): 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) - with pytest.raises(NotImplementedError): + msg = "Format %tC not implemented" + with pytest.raises(NotImplementedError, match=msg): with tm.ensure_clean() as path: original.to_stata(path, convert_dates={'dates': 'tC'}) @@ -1291,9 +1310,10 @@ def test_unsupported_datetype(self): def test_repeated_column_labels(self): # GH 13923 - with pytest.raises(ValueError) as cm: + msg = (r"Value labels for column ethnicsn are not unique\. The" + r" repeated labels are:\n\n-+wolof") + with pytest.raises(ValueError, match=msg): read_stata(self.dta23, convert_categoricals=True) - assert 'wolof' in cm.exception def test_stata_111(self): # 111 is an old version but still used by current versions of @@ -1316,17 +1336,18 @@ def test_out_of_range_double(self): 'ColumnTooBig': [0.0, np.finfo(np.double).eps, np.finfo(np.double).max]}) - with pytest.raises(ValueError) as cm: + msg = (r"Column ColumnTooBig has a maximum value \(.+\)" + r" outside the range supported by Stata \(.+\)") + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: df.to_stata(path) - assert 'ColumnTooBig' in cm.exception df.loc[2, 'ColumnTooBig'] = np.inf - with pytest.raises(ValueError) as cm: + msg = ("Column ColumnTooBig has a maximum value of infinity which" + " is outside the range supported by Stata") + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: df.to_stata(path) - assert 'ColumnTooBig' in cm.exception - assert 'infinity' in cm.exception def test_out_of_range_float(self): original = DataFrame({'ColumnOk': [0.0, @@ -1348,11 +1369,11 @@ def test_out_of_range_float(self): reread.set_index('index')) original.loc[2, 'ColumnTooBig'] = np.inf - with pytest.raises(ValueError) as cm: + msg = ("Column ColumnTooBig has a maximum value of infinity which" + " is outside the range supported by Stata") + with pytest.raises(ValueError, match=msg): with tm.ensure_clean() as path: original.to_stata(path) - assert 'ColumnTooBig' in cm.exception - assert 'infinity' in cm.exception def test_path_pathlib(self): df = tm.makeDataFrame() @@ -1466,7 +1487,8 @@ def test_invalid_date_conversion(self): 'dates': dates}) with tm.ensure_clean() as path: - with pytest.raises(ValueError): + msg = "convert_dates key must be a column or an integer" + with pytest.raises(ValueError, match=msg): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) @@ -1546,10 +1568,14 @@ def test_all_none_exception(self, version): output = pd.DataFrame(output) output.loc[:, 'none'] = None with tm.ensure_clean() as path: - with pytest.raises(ValueError) as excinfo: + msg = (r"Column `none` cannot be exported\.\n\n" + "Only string-like object arrays containing all strings or a" + r" mix of strings and None can be exported\. Object arrays" + r" containing only null values are prohibited\. Other" + " object typescannot be exported and must first be" + r" converted to one of the supported types\.") + with pytest.raises(ValueError, match=msg): output.to_stata(path, version=version) - assert 'Only string-like' in excinfo.value.args[0] - assert 'Column `none`' in excinfo.value.args[0] @pytest.mark.parametrize('version', [114, 117]) def test_invalid_file_not_written(self, version): @@ -1557,7 +1583,12 @@ def test_invalid_file_not_written(self, version): df = DataFrame([content], columns=['invalid']) expected_exc = UnicodeEncodeError if PY3 else UnicodeDecodeError with tm.ensure_clean() as path: - with pytest.raises(expected_exc): + msg1 = (r"'latin-1' codec can't encode character '\\ufffd'" + r" in position 14: ordinal not in range\(256\)") + msg2 = ("'ascii' codec can't decode byte 0xef in position 14:" + r" ordinal not in range\(128\)") + with pytest.raises(expected_exc, match=r'{}|{}'.format( + msg1, msg2)): with tm.assert_produces_warning(ResourceWarning): df.to_stata(path)
xref #24332
https://api.github.com/repos/pandas-dev/pandas/pulls/24676
2019-01-08T22:53:52Z
2019-01-09T12:17:50Z
2019-01-09T12:17:50Z
2019-01-09T13:23:44Z
[WIP]: API ExtensionDtype for DTA & TDA
diff --git a/doc/source/api/arrays.rst b/doc/source/api/arrays.rst index 5ecc5181af22c..8a66e1d87c1c6 100644 --- a/doc/source/api/arrays.rst +++ b/doc/source/api/arrays.rst @@ -136,7 +136,7 @@ Methods A collection of timestamps may be stored in a :class:`arrays.DatetimeArray`. For timezone-aware data, the ``.dtype`` of a ``DatetimeArray`` is a -:class:`DatetimeTZDtype`. For timezone-naive data, ``np.dtype("datetime64[ns]")`` +:class:`DatetimeTZDtype`. For timezone-naive data, :class:`DatetimeDtype` is used. If the data are tz-aware, then every value in the array must have the same timezone. @@ -145,6 +145,7 @@ If the data are tz-aware, then every value in the array must have the same timez :toctree: generated/ arrays.DatetimeArray + DatetimeDtype DatetimeTZDtype .. _api.arrays.timedelta: diff --git a/pandas/core/api.py b/pandas/core/api.py index afc929c39086c..3bb659a83ccae 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -21,7 +21,9 @@ CategoricalDtype, PeriodDtype, IntervalDtype, + DatetimeDtype, DatetimeTZDtype, + TimedeltaDtype, ) from pandas.core.arrays import Categorical, array from pandas.core.groupby import Grouper diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a2d67efbecbba..f622ae96c6ef5 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -18,7 +18,7 @@ is_datetime64_ns_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_type, is_float_dtype, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype) -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import DatetimeDtype, DatetimeTZDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndexClass, ABCPandasArray, ABCSeries) from pandas.core.dtypes.missing import isna @@ -334,6 +334,8 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): # a tz-aware Timestamp (with a tz specific to its datetime) will # be incorrect(ish?) for the array as a whole dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz)) + else: + dtype = DatetimeDtype() self._data = values self._dtype = dtype @@ -1987,7 +1989,8 @@ def _validate_dt64_dtype(dtype): if dtype is not None: dtype = pandas_dtype(dtype) if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) - or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): + or not isinstance(dtype, (np.dtype, DatetimeTZDtype, + DatetimeDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[ns]' or DatetimeTZDtype'." .format(dtype=dtype)) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index be1a7097b0e0d..e1cc86ba9245b 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -19,7 +19,7 @@ is_integer_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype) -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import DatetimeTZDtype, TimedeltaDtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndexClass, ABCSeries, ABCTimedeltaIndex) from pandas.core.dtypes.missing import isna @@ -127,7 +127,7 @@ def _box_func(self): @property def dtype(self): - return _TD_DTYPE + return self._dtype # ---------------------------------------------------------------- # Constructors @@ -160,16 +160,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): # nanosecond UTC (or tz-naive) unix timestamps values = values.view(_TD_DTYPE) - if values.dtype != _TD_DTYPE: - raise TypeError(_BAD_DTYPE.format(dtype=values.dtype)) - - try: - dtype_mismatch = dtype != _TD_DTYPE - except TypeError: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) - else: - if dtype_mismatch: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + _validate_td64_dtype(values.dtype) + dtype = _validate_td64_dtype(dtype) if freq == "infer": msg = ( @@ -192,21 +184,19 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): @classmethod def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): - assert dtype == _TD_DTYPE, dtype + dtype = _validate_td64_dtype(dtype) assert isinstance(values, np.ndarray), type(values) result = object.__new__(cls) result._data = values.view(_TD_DTYPE) result._freq = to_offset(freq) - result._dtype = _TD_DTYPE + result._dtype = dtype return result @classmethod def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, freq=None, unit=None): - if dtype != _TD_DTYPE: - raise ValueError("Only timedelta64[ns] dtype is valid.") - + _validate_td64_dtype(dtype) freq, freq_infer = dtl.maybe_infer_freq(freq) data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit) @@ -1015,3 +1005,40 @@ def _generate_regular_range(start, end, periods, offset): data = np.arange(b, e, stride, dtype=np.int64) return data + + +def _validate_td64_dtype(dtype): + """ + Validate a dtype for TimedeltaArray. + + Parameters + ---------- + dtype : Union[str, numpy.dtype, Timedelta] + Only np.dtype("m8[ns]") is allowed numpy dtypes. + + Returns + ------- + TimedeltaDtype + """ + if isinstance(dtype, compat.string_types): + try: + dtype = np.dtype(dtype) + except TypeError: + # not a Numpy dtype + pass + + if isinstance(dtype, np.dtype): + if dtype != _TD_DTYPE: + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + + dtype = TimedeltaDtype() + + elif isinstance(dtype, compat.string_types): + if dtype != "ns": + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + dtype = TimedeltaDtype(dtype) + + if dtype != TimedeltaDtype(): + raise ValueError("Only timedelta64[ns] dtype is valid") + + return dtype diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index e9bf0f87088db..cc7ac05aee4ee 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -4,12 +4,12 @@ import numpy as np from pandas._libs import algos, lib -from pandas._libs.tslibs import conversion +from pandas._libs.tslibs import Timedelta, Timestamp, conversion from pandas.compat import PY3, PY36, string_types from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, - PandasExtensionDtype, PeriodDtype, registry) + PandasExtensionDtype, PeriodDtype, TimedeltaDtype, registry) from pandas.core.dtypes.generic import ( ABCCategorical, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries) @@ -426,9 +426,24 @@ def is_datetime64_dtype(arr_or_dtype): True >>> is_datetime64_dtype([1, 2, 3]) False + >>> is_datetime64_dtype(pd.DatetimeDtype()) + True + >>> is_datetime64_dtype(pd.DatetimeTZDtype(tz="CET")) + False """ + # It's somewhat tricky to support both of the following: + # 1. is_datetime64_dtype(DatetimeDtype()) == True + # 2. is_datetime64_dtype(DatetimeTZDtype()) == False + # because both use `Timestamp` as the `type`. + # So we look at the `dtype` to see if there's a `.tz` attached. + dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) + try: + dtype = pandas_dtype(dtype) + except (ValueError, TypeError): + dtype = None - return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) + return (_is_dtype_type(arr_or_dtype, classes(np.datetime64, Timestamp)) + and getattr(dtype, 'tz', None) is None) def is_datetime64tz_dtype(arr_or_dtype): @@ -497,7 +512,7 @@ def is_timedelta64_dtype(arr_or_dtype): False """ - return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) + return _is_dtype_type(arr_or_dtype, classes(np.timedelta64, Timedelta)) def is_period_dtype(arr_or_dtype): @@ -1192,7 +1207,10 @@ def is_timedelta64_ns_dtype(arr_or_dtype): >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) False """ - return _is_dtype(arr_or_dtype, lambda dtype: dtype == _TD_DTYPE) + def condition(dtype): + return isinstance(dtype, TimedeltaDtype) or dtype == _TD_DTYPE + + return _is_dtype(arr_or_dtype, condition) def is_datetime_or_timedelta_dtype(arr_or_dtype): @@ -1229,9 +1247,11 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype): >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ - - return _is_dtype_type( - arr_or_dtype, classes(np.datetime64, np.timedelta64)) + dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) + return (_is_dtype_type( + arr_or_dtype, classes(np.datetime64, np.timedelta64, + Timestamp, Timedelta)) + and getattr(dtype, 'tz', None) is None) def _is_unorderable_exception(e): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index f84471c3b04e8..291816522d38a 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -3,10 +3,10 @@ import warnings import numpy as np -import pytz from pandas._libs.interval import Interval -from pandas._libs.tslibs import NaT, Period, Timestamp, timezones +from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp, timezones +from pandas._libs.tslibs.timezones import tz_compare from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass @@ -573,9 +573,7 @@ def _is_boolean(self): return is_bool_dtype(self.categories) -@register_extension_dtype -class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype): - +class _DatetimeDtypeBase(PandasExtensionDtype, ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype. @@ -583,15 +581,110 @@ class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype): THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns] """ + _match = None # type: typing.re.Pattern type = Timestamp kind = 'M' str = '|M8[ns]' num = 101 base = np.dtype('M8[ns]') na_value = NaT + + @property + def unit(self): + """The precision of the datetime data.""" + return self._unit + + @classmethod + def construct_array_type(cls): + """ + Return the array type associated with this dtype + + Returns + ------- + type + """ + from pandas.core.arrays import DatetimeArray + return DatetimeArray + + @classmethod + def construct_from_string(cls, string): + """ + Construct a DatetimeTZDtype from a string. + + Parameters + ---------- + string : str + The string alias for this DatetimeTZDtype. + Should be formatted like ``datetime64[ns, <tz>]``, + where ``<tz>`` is the timezone name. + + Examples + -------- + >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') + datetime64[ns, UTC] + """ + if isinstance(string, compat.string_types): + msg = "Could not construct DatetimeTZDtype from '{}'" + try: + match = cls._match.match(string) + if match: + d = match.groupdict() + return cls(**d) + except Exception: + # TODO(py3): Change this pass to `raise TypeError(msg) from e` + pass + raise TypeError(msg.format(string)) + + raise TypeError("Could not construct DatetimeTZDtype") + + @property + def name(self): + """A string representation of the dtype.""" + return str(self) + + def __hash__(self): + # make myself hashable + # TODO: update this. + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, compat.string_types): + return other == self.name + + return (isinstance(other, type(self)) and + self.unit == other.unit and + # TODO: figure out why this was needed + # failure in test_get_loc_datetimelike_overlapping + # is something not being normalized? + tz_compare(getattr(self, 'tz', None), + getattr(other, 'tz', None))) + + +class DatetimeDtype(_DatetimeDtypeBase): + # This does not register itself as an ExtensionDtype. + # We found it easier to let pandas_dtype('M8[ns]') continue + # to be np.dtype('M8[ns]') + # Registering as an extension dtype caused issues in places + # like is_dtype_equal, is_datetime*, etc. + _metadata = ('unit',) + _match = re.compile(r"(datetime64|M8)\[ns\]") + + def __init__(self, unit="ns"): + assert unit == "ns" + self._unit = unit + + def __unicode__(self): + return 'datetime64[ns]' + + def __getstate__(self): + # override PandasExtensionDtype.__getstate__ + return self.__dict__ + + +@register_extension_dtype +class DatetimeTZDtype(_DatetimeDtypeBase): _metadata = ('unit', 'tz') _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") - _cache = {} def __init__(self, unit="ns", tz=None): """ @@ -639,92 +732,86 @@ def __init__(self, unit="ns", tz=None): if tz: tz = timezones.maybe_get_tz(tz) - elif tz is not None: - raise pytz.UnknownTimeZoneError(tz) - elif tz is None: + else: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz - @property - def unit(self): - """The precision of the datetime data.""" - return self._unit - @property def tz(self): """The timezone.""" return self._tz - @classmethod - def construct_array_type(cls): - """ - Return the array type associated with this dtype + def __unicode__(self): + return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz) - Returns - ------- - type - """ - from pandas.core.arrays import DatetimeArray - return DatetimeArray + def __setstate__(self, state): + # for pickle compat. + self._tz = state['tz'] + self._unit = state['unit'] - @classmethod - def construct_from_string(cls, string): - """ - Construct a DatetimeTZDtype from a string. - Parameters - ---------- - string : str - The string alias for this DatetimeTZDtype. - Should be formatted like ``datetime64[ns, <tz>]``, - where ``<tz>`` is the timezone name. +class TimedeltaDtype(PandasExtensionDtype, ExtensionDtype): + _metadata = ('unit',) + _match = re.compile(r"(timedelta64|m8)\[(?P<unit>\w+)\]") + type = Timedelta + kind = 'm' + str = '|m8[ns]' + base = np.dtype('m8[ns]') + na_value = NaT - Examples - -------- - >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') - datetime64[ns, UTC] - """ - if isinstance(string, compat.string_types): - msg = "Could not construct DatetimeTZDtype from '{}'" - try: - match = cls._match.match(string) - if match: - d = match.groupdict() - return cls(unit=d['unit'], tz=d['tz']) - except Exception: - # TODO(py3): Change this pass to `raise TypeError(msg) from e` - pass - raise TypeError(msg.format(string)) + def __init__(self, unit="ns"): + if isinstance(unit, np.dtype): + if unit != np.dtype("m8[ns]"): + raise ValueError() + unit = "ns" + if unit != "ns": + raise ValueError() - raise TypeError("Could not construct DatetimeTZDtype") + self._unit = unit def __unicode__(self): - return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz) + return "timedelta64[ns]" + + def __eq__(self, other): + # TODO: Allow comparison with numpy dtypes? + if isinstance(other, compat.string_types): + return other == self.name or other == 'm8[ns]' + return super(TimedeltaDtype, self).__eq__(other) + + def __hash__(self): + return super(PandasExtensionDtype, self).__hash__() + + def __getstate__(self): + # override PandasExtensionDtype.__getstate__ + return self.__dict__ + + @property + def unit(self): + return self._unit @property def name(self): - """A string representation of the dtype.""" return str(self) - def __hash__(self): - # make myself hashable - # TODO: update this. - return hash(str(self)) - - def __eq__(self, other): - if isinstance(other, compat.string_types): - return other == self.name + @classmethod + def construct_array_type(cls): + from pandas.arrays import TimedeltaArray + return TimedeltaArray - return (isinstance(other, DatetimeTZDtype) and - self.unit == other.unit and - str(self.tz) == str(other.tz)) + @classmethod + def construct_from_string(cls, string): + try: + match = cls._match.match(string) + if match: + d = match.groupdict() + return cls(d['unit']) + except Exception: + pass - def __setstate__(self, state): - # for pickle compat. - self._tz = state['tz'] - self._unit = state['unit'] + msg = "Could not construct a TimedeltaArray from '{}'" + raise TypeError(msg.format(string)) @register_extension_dtype diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 309fb3b841461..84e9cecf104b9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -70,7 +70,9 @@ is_iterator, is_sequence, is_named_tuple) -from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCMultiIndex +from pandas.core.dtypes.generic import ( + ABCSeries, ABCIndexClass, ABCMultiIndex, ABCDatetimeArray +) from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms @@ -3643,8 +3645,15 @@ def reindexer(value): value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, infer_dtype) + if isinstance(value, ABCDatetimeArray) and not value.tz: + # Ensure that tz-naive data are only stored internally as ndarray. + # This should be unnecessary when sanitize_array and + # maybe_cast_to_datetime are cleaned up. + value = value._data + # return internal types directly if is_extension_type(value) or is_extension_array_dtype(value): + return value # broadcast across multiple columns if necessary diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5a9bf6c2c6263..f34b06254fce0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -312,7 +312,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, (dtype is not None and is_timedelta64_dtype(dtype))): from pandas import TimedeltaIndex result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) - if dtype is not None and _o_dtype == dtype: + if dtype is not None and is_object_dtype(dtype): return Index(result.to_pytimedelta(), dtype=_o_dtype) else: return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index a4e058160e567..8e8a196a3db5c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -359,7 +359,9 @@ def __array__(self, dtype=None): @property def dtype(self): - return self._data.dtype + if self.tz: + return self._data.dtype + return _NS_DTYPE @property def tz(self): diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index cbe5ae198838f..b9c8f136eb216 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -6,6 +6,7 @@ from pandas._libs import ( NaT, Timedelta, index as libindex, join as libjoin, lib) +from pandas._libs.properties import cache_readonly import pandas.compat as compat from pandas.util._decorators import Appender, Substitution @@ -17,7 +18,8 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays import datetimelike as dtl -from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td +from pandas.core.arrays.timedeltas import ( + TimedeltaArray, _is_convertible_to_td, _validate_td64_dtype) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs @@ -235,9 +237,8 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): else: if freq is None: freq = values.freq + _validate_td64_dtype(dtype) assert isinstance(values, TimedeltaArray), type(values) - assert dtype == _TD_DTYPE, dtype - assert values.dtype == 'm8[ns]', values.dtype tdarr = TimedeltaArray._simple_new(values._data, freq=freq) result = object.__new__(cls) @@ -249,6 +250,10 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): result._reset_identity() return result + @cache_readonly + def dtype(self): + return self._data.dtype.base + # ------------------------------------------------------------------- def __setstate__(self, state): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index df764aa4ba666..9e37e86cb0164 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -26,7 +26,8 @@ is_re, is_re_compilable, is_sparse, is_timedelta64_dtype, pandas_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.dtypes import ( - CategoricalDtype, ExtensionDtype, PandasExtensionDtype) + CategoricalDtype, DatetimeDtype, ExtensionDtype, PandasExtensionDtype, + TimedeltaDtype) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, ABCIndexClass, ABCSeries) @@ -593,6 +594,8 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, # convert dtypes if needed dtype = pandas_dtype(dtype) + if isinstance(dtype, DatetimeDtype): + dtype = _NS_DTYPE # astype processing if is_dtype_equal(self.dtype, dtype): if copy: @@ -1678,6 +1681,7 @@ class ExtensionBlock(NonConsolidatableMixIn, Block): def __init__(self, values, placement, ndim=None): values = self._maybe_coerce_values(values) + assert not isinstance(values.dtype, DatetimeDtype) super(ExtensionBlock, self).__init__(values, placement, ndim) def _maybe_coerce_values(self, values): @@ -2681,6 +2685,10 @@ def f(m, v, i): blocks = self.split_and_operate(None, f, False) else: values = f(None, self.values.ravel(), None) + + if isinstance(values, DatetimeArray) and not values.tz: + # ensure that we get a DatetimeBlock. + values = values._data blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)] @@ -3056,7 +3064,10 @@ def get_block_type(values, dtype=None): assert not is_datetime64tz_dtype(values) cls = DatetimeBlock elif is_datetime64tz_dtype(values): - cls = DatetimeTZBlock + if dtype.tz: + cls = DatetimeTZBlock + else: + cls = DatetimeBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): @@ -3085,6 +3096,13 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, "in a future release.", DeprecationWarning) if klass is None: dtype = dtype or values.dtype + + if isinstance(dtype, (DatetimeDtype, TimedeltaDtype)): + # for DataFrame.__setitem__[scalar] + # this is... not great. + values, dtype = values._data, values.dtype + values = _block_shape(values, ndim) + klass = get_block_type(values, dtype) elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 7af347a141781..327b88c3b85c9 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -19,12 +19,14 @@ maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_castable, maybe_convert_platform, maybe_infer_to_datetimelike, maybe_upcast) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal, + _NS_DTYPE, is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_extension_type, is_float_dtype, is_integer_dtype, is_iterator, is_list_like, is_object_dtype, pandas_dtype) +from pandas.core.dtypes.dtypes import DatetimeDtype from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCDatetimeIndex, ABCIndexClass, ABCPandasArray, - ABCPeriodIndex, ABCSeries, ABCTimedeltaIndex) + ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, + ABCPandasArray, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, + ABCTimedeltaIndex) from pandas.core.dtypes.missing import isna from pandas.core import algorithms, common as com @@ -540,6 +542,7 @@ def sanitize_array(data, index, dtype=None, copy=False, Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified. """ + from pandas.core.dtypes.generic import ABCDatetimeArray if dtype is not None: dtype = pandas_dtype(dtype) @@ -598,6 +601,18 @@ def sanitize_array(data, index, dtype=None, copy=False, if copy: subarr = data.copy() + + # Ensure that we don't allow the following in Internals + # * DatetimeArray[DatetimeDtype] (tz-naive) + # * TimedeltaArray + # For the time being, we only want to allow storing those + # as DatetimeBlock and TimedeltaBlocks. + + if isinstance(subarr, ABCDatetimeArray) and not subarr.tz: + subarr = subarr._data + if isinstance(subarr, ABCTimedeltaArray): + subarr = subarr._data + return subarr elif isinstance(data, (list, tuple)) and len(data) > 0: @@ -624,6 +639,11 @@ def sanitize_array(data, index, dtype=None, copy=False, else: subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) + if isinstance(subarr, ABCDatetimeArray) and not subarr.tz: + subarr = subarr._data + if isinstance(subarr, ABCTimedeltaArray): + subarr = subarr._data + # scalar like, GH if getattr(subarr, 'ndim', 0) == 0: if isinstance(data, list): # pragma: no cover @@ -638,6 +658,10 @@ def sanitize_array(data, index, dtype=None, copy=False, # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) + if isinstance(dtype, DatetimeDtype): + # for scalar timestamp / nat + dtype = _NS_DTYPE + subarr = construct_1d_arraylike_from_scalar( value, len(index), dtype) @@ -678,6 +702,9 @@ def sanitize_array(data, index, dtype=None, copy=False, except IncompatibleFrequency: pass + if isinstance(data, ABCDatetimeArray): + assert data.tz + return subarr @@ -702,6 +729,8 @@ def _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure): isinstance(subarr, np.ndarray))): subarr = construct_1d_object_array_from_listlike(subarr) elif not is_extension_type(subarr): + if isinstance(dtype, DatetimeDtype): + dtype = _NS_DTYPE subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except (ValueError, TypeError): @@ -718,4 +747,19 @@ def _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure): raise else: subarr = np.array(arr, dtype=object, copy=copy) + return subarr + + +def _ensure_dta_tda_ndarray(arr): + """Ensure that an ndarray is returned for specific cases. + + * tz-naive DatatimeArray + * TimedeltaArray + """ + if isinstance(arr, ABCDatetimeArray): + if not arr.tz: + arr = arr._data + if isinstance(arr, ABCTimedeltaArray): + arr = arr._data + return arr diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index cafd3a9915fa0..ea75384d76804 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -144,6 +144,7 @@ def f(values, axis=None, skipna=True, **kwds): def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 + # import pdb; pdb.set_trace() if (not is_object_dtype(dt) and not (is_datetime_or_timedelta_dtype(dt) or is_datetime64tz_dtype(dt))): diff --git a/pandas/core/series.py b/pandas/core/series.py index eb412add7bbbb..6e4f037a2bea3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -22,7 +22,7 @@ is_iterator, is_list_like, is_scalar, is_string_like, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, ABCSeries, - ABCSparseArray, ABCSparseSeries) + ABCSparseArray, ABCSparseSeries, ABCTimedeltaArray) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, remove_na_arraylike) @@ -260,6 +260,9 @@ def __init__(self, data=None, index=None, dtype=None, name=None, else: data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True) + if isinstance(data, ABCDatetimeArray): + assert data.tz + assert not isinstance(data, ABCTimedeltaArray) data = SingleBlockManager(data, index, fastpath=True) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 07cf358c765b3..8e3f116173dc6 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -48,7 +48,7 @@ class TestPDApi(Base): 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex', 'IntervalArray', 'CategoricalDtype', 'PeriodDtype', 'IntervalDtype', - 'DatetimeTZDtype', + 'DatetimeDtype', 'DatetimeTZDtype', 'TimedeltaDtype', 'Int8Dtype', 'Int16Dtype', 'Int32Dtype', 'Int64Dtype', 'UInt8Dtype', 'UInt16Dtype', 'UInt32Dtype', 'UInt64Dtype', ] diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 62e96fd39a759..ea194df7fea86 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -201,16 +201,29 @@ def test_is_datetime64_dtype(): assert not com.is_datetime64_dtype([1, 2, 3]) assert not com.is_datetime64_dtype(np.array([], dtype=int)) + tzd = DatetimeTZDtype(tz="CET") + assert not com.is_datetime64_dtype(tzd) + assert not com.is_datetime64_dtype( + pd.arrays.DatetimeArray._from_sequence(['2000'], dtype=tzd) + ) + assert com.is_datetime64_dtype(np.datetime64) assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64)) + assert com.is_datetime64_dtype(pd.DatetimeDtype()) + assert com.is_datetime64_dtype( + pd.arrays.DatetimeArray._from_sequence(['2000']) + ) def test_is_datetime64tz_dtype(): assert not com.is_datetime64tz_dtype(object) assert not com.is_datetime64tz_dtype([1, 2, 3]) assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) - assert com.is_datetime64tz_dtype(pd.DatetimeIndex(['2000'], - tz="US/Eastern")) + idx = pd.DatetimeIndex(['2000'], tz="US/Eastern") + assert com.is_datetime64tz_dtype(idx) + assert com.is_datetime64tz_dtype(idx.dtype) + assert com.is_datetime64tz_dtype(idx._data) + assert com.is_datetime64tz_dtype(idx._data.dtype) def test_is_timedelta64_dtype(): @@ -225,7 +238,13 @@ def test_is_timedelta64_dtype(): assert com.is_timedelta64_dtype(np.timedelta64) assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) - assert com.is_timedelta64_dtype(pd.to_timedelta(['0 days', '1 days'])) + + tdi = pd.to_timedelta(['0 days', '1 days']) + + assert com.is_timedelta64_dtype(tdi) + assert com.is_timedelta64_dtype(tdi.dtype) + assert com.is_timedelta64_dtype(tdi._data) + assert com.is_timedelta64_dtype(tdi._data.dtype) def test_is_period_dtype(): @@ -424,6 +443,18 @@ def test_is_datetime_or_timedelta_dtype(): assert com.is_datetime_or_timedelta_dtype( np.array([], dtype=np.datetime64)) + idx = pd.date_range("2000", periods=2) + assert com.is_datetime_or_timedelta_dtype(idx) + assert com.is_datetime_or_timedelta_dtype(idx.dtype) + assert com.is_datetime_or_timedelta_dtype(idx._data) + assert com.is_datetime_or_timedelta_dtype(idx._data.dtype) + + idx = pd.timedelta_range("1H", periods=2) + assert com.is_datetime_or_timedelta_dtype(idx) + assert com.is_datetime_or_timedelta_dtype(idx.dtype) + assert com.is_datetime_or_timedelta_dtype(idx._data) + assert com.is_datetime_or_timedelta_dtype(idx._data.dtype) + def test_is_numeric_v_string_like(): assert not com.is_numeric_v_string_like(1, 1) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 0fe0a845f5129..55f1591bbb7f3 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -825,6 +825,62 @@ def test_update_dtype_errors(self, bad_dtype): dtype.update_dtype(bad_dtype) +class TestDatetimeDtype(Base): + def create(self): + return pd.DatetimeDtype() + + def test_equality(self): + a = pd.DatetimeDtype() + b = pd.DatetimeDtype() + assert a == b + + c = np.dtype("int8") + assert a != c + # TODO: equality to 'M8[ns]'? np.dtype('M8[ns]')? + + def test_construct_from_string(self): + result = pd.DatetimeDtype.construct_from_string('datetime64[ns]') + expected = pd.DatetimeDtype() + assert result == expected + + def test_attrs(self): + assert self.dtype.unit == 'ns' + assert self.dtype.kind == 'M' + assert str(self.dtype) == 'datetime64[ns]' + assert self.dtype.base == np.dtype('M8[ns]') + assert self.dtype.type is pd.Timestamp + assert self.dtype.na_value is pd.NaT + assert self.dtype.construct_array_type() is pd.arrays.DatetimeArray + + +class TestTimedeltaDtype(Base): + def create(self): + return pd.TimedeltaDtype() + + def test_equality(self): + a = pd.TimedeltaDtype() + b = pd.TimedeltaDtype() + assert a == b + + c = np.dtype("int") + assert a != c + # TODO: equality to 'm8[ns]'? np.dtype('m8[ns]')? + + def test_construct_from_string(self): + result = pd.DatetimeDtype.construct_from_string('datetime64[ns]') + expected = pd.DatetimeDtype() + assert result == expected + + def test_attrs(self): + assert self.dtype.unit == 'ns' + assert self.dtype.kind == 'm' + assert str(self.dtype) == 'timedelta64[ns]' + assert self.dtype.base == np.dtype('m8[ns]') + assert self.dtype.type is pd.Timedelta + assert self.dtype.na_value is pd.NaT + assert self.dtype.construct_array_type() is pd.arrays.TimedeltaArray + + @pytest.mark.parametrize('dtype', [ CategoricalDtype, IntervalDtype, diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 00ad35bf6a924..c06e688f6e95e 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -1,237 +1,29 @@ +""" +Extension Tests for *tz-naive* arrays.DatetimeArray. + +Currently, we only run the Dtype tests, as we do not allow a +tz-naive DatetimeArray inside internals. +""" import numpy as np import pytest -from pandas.core.dtypes.dtypes import DatetimeTZDtype - import pandas as pd -from pandas.core.arrays import DatetimeArray from pandas.tests.extension import base -@pytest.fixture(params=["US/Central"]) -def dtype(request): - return DatetimeTZDtype(unit="ns", tz=request.param) - - @pytest.fixture -def data(dtype): - data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), - dtype=dtype) - return data - - -@pytest.fixture -def data_missing(dtype): - return DatetimeArray( - np.array(['NaT', '2000-01-01'], dtype='datetime64[ns]'), - dtype=dtype - ) - - -@pytest.fixture -def data_for_sorting(dtype): - a = pd.Timestamp('2000-01-01') - b = pd.Timestamp('2000-01-02') - c = pd.Timestamp('2000-01-03') - return DatetimeArray(np.array([b, c, a], dtype='datetime64[ns]'), - dtype=dtype) - - -@pytest.fixture -def data_missing_for_sorting(dtype): - a = pd.Timestamp('2000-01-01') - b = pd.Timestamp('2000-01-02') - return DatetimeArray(np.array([b, 'NaT', a], dtype='datetime64[ns]'), - dtype=dtype) - - -@pytest.fixture -def data_for_grouping(dtype): - """ - Expected to be like [B, B, NA, NA, A, A, B, C] - - Where A < B < C and NA is missing - """ - a = pd.Timestamp('2000-01-01') - b = pd.Timestamp('2000-01-02') - c = pd.Timestamp('2000-01-03') - na = 'NaT' - return DatetimeArray(np.array([b, b, na, na, a, a, b, c], - dtype='datetime64[ns]'), - dtype=dtype) +def dtype(): + return pd.DatetimeDtype() @pytest.fixture -def na_cmp(): - def cmp(a, b): - return a is pd.NaT and a is b - return cmp - - -@pytest.fixture -def na_value(): - return pd.NaT - - -# ---------------------------------------------------------------------------- -class BaseDatetimeTests(object): - pass - - -# ---------------------------------------------------------------------------- -# Tests -class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): - pass - - -class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): - pass - - -class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): - pass - - -class TestMethods(BaseDatetimeTests, base.BaseMethodsTests): - @pytest.mark.skip(reason="Incorrect expected") - def test_value_counts(self, all_data, dropna): - pass - - def test_combine_add(self, data_repeated): - # Timestamp.__add__(Timestamp) not defined - pass - - -class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests): - - def test_array_interface(self, data): - if data.tz: - # np.asarray(DTA) is currently always tz-naive. - pytest.skip("GH-23569") - else: - super(TestInterface, self).test_array_interface(data) - - -class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): - implements = {'__sub__', '__rsub__'} - - def test_arith_series_with_scalar(self, data, all_arithmetic_operators): - if all_arithmetic_operators in self.implements: - s = pd.Series(data) - self.check_opname(s, all_arithmetic_operators, s.iloc[0], - exc=None) - else: - # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) - - def test_add_series_with_extension_array(self, data): - # Datetime + Datetime not implemented - s = pd.Series(data) - msg = 'cannot add DatetimeArray and DatetimeArray' - with pytest.raises(TypeError, match=msg): - s + data - - def test_arith_series_with_array(self, data, all_arithmetic_operators): - if all_arithmetic_operators in self.implements: - s = pd.Series(data) - self.check_opname(s, all_arithmetic_operators, s.iloc[0], - exc=None) - else: - # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) - - def test_error(self, data, all_arithmetic_operators): - pass - - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # Right now, we have trouble with this. Returning NotImplemented - # fails other tests like - # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: - # test_dt64_seris_add_intlike - return super( - TestArithmeticOps, - self - ).test_direct_arith_with_series_returns_not_implemented(data) - - -class TestCasting(BaseDatetimeTests, base.BaseCastingTests): - pass - - -class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): - - def _compare_other(self, s, data, op_name, other): - # the base test is not appropriate for us. We raise on comparison - # with (some) integers, depending on the value. - pass - - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - return super( - TestComparisonOps, - self - ).test_direct_arith_with_series_returns_not_implemented(data) - - -class TestMissing(BaseDatetimeTests, base.BaseMissingTests): - pass - - -class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): - - @pytest.mark.skip(reason="We have DatetimeTZBlock") - def test_concat(self, data, in_frame): - pass - - def test_concat_mixed_dtypes(self, data): - # concat(Series[datetimetz], Series[category]) uses a - # plain np.array(values) on the DatetimeArray, which - # drops the tz. - super(TestReshaping, self).test_concat_mixed_dtypes(data) - - @pytest.mark.parametrize("obj", ["series", "frame"]) - def test_unstack(self, obj): - # GH-13287: can't use base test, since building the expected fails. - data = DatetimeArray._from_sequence(['2000', '2001', '2002', '2003'], - tz='US/Central') - index = pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), - names=['a', 'b']) - - if obj == "series": - ser = pd.Series(data, index=index) - expected = pd.DataFrame({ - "A": data.take([0, 1]), - "B": data.take([2, 3]) - }, index=pd.Index(['a', 'b'], name='b')) - expected.columns.name = 'a' - - else: - ser = pd.DataFrame({"A": data, "B": data}, index=index) - expected = pd.DataFrame( - {("A", "A"): data.take([0, 1]), - ("A", "B"): data.take([2, 3]), - ("B", "A"): data.take([0, 1]), - ("B", "B"): data.take([2, 3])}, - index=pd.Index(['a', 'b'], name='b') - ) - expected.columns.names = [None, 'a'] - - result = ser.unstack(0) - self.assert_equal(result, expected) - - -class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): - pass +def data(): + return pd.arrays.DatetimeArray(np.arange(0, 100, dtype='M8[ns]')) -class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests): +class BaseTimedeltaTests(object): pass -class TestPrinting(BaseDatetimeTests, base.BasePrintingTests): +class TestDtype(BaseTimedeltaTests, base.BaseDtypeTests): pass diff --git a/pandas/tests/extension/test_datetimetz.py b/pandas/tests/extension/test_datetimetz.py new file mode 100644 index 0000000000000..00ad35bf6a924 --- /dev/null +++ b/pandas/tests/extension/test_datetimetz.py @@ -0,0 +1,237 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas.core.arrays import DatetimeArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["US/Central"]) +def dtype(request): + return DatetimeTZDtype(unit="ns", tz=request.param) + + +@pytest.fixture +def data(dtype): + data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), + dtype=dtype) + return data + + +@pytest.fixture +def data_missing(dtype): + return DatetimeArray( + np.array(['NaT', '2000-01-01'], dtype='datetime64[ns]'), + dtype=dtype + ) + + +@pytest.fixture +def data_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + return DatetimeArray(np.array([b, c, a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + return DatetimeArray(np.array([b, 'NaT', a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + na = 'NaT' + return DatetimeArray(np.array([b, b, na, na, a, a, b, c], + dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return a is pd.NaT and a is b + return cmp + + +@pytest.fixture +def na_value(): + return pd.NaT + + +# ---------------------------------------------------------------------------- +class BaseDatetimeTests(object): + pass + + +# ---------------------------------------------------------------------------- +# Tests +class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): + pass + + +class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): + pass + + +class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): + pass + + +class TestMethods(BaseDatetimeTests, base.BaseMethodsTests): + @pytest.mark.skip(reason="Incorrect expected") + def test_value_counts(self, all_data, dropna): + pass + + def test_combine_add(self, data_repeated): + # Timestamp.__add__(Timestamp) not defined + pass + + +class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests): + + def test_array_interface(self, data): + if data.tz: + # np.asarray(DTA) is currently always tz-naive. + pytest.skip("GH-23569") + else: + super(TestInterface, self).test_array_interface(data) + + +class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): + implements = {'__sub__', '__rsub__'} + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_add_series_with_extension_array(self, data): + # Datetime + Datetime not implemented + s = pd.Series(data) + msg = 'cannot add DatetimeArray and DatetimeArray' + with pytest.raises(TypeError, match=msg): + s + data + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_error(self, data, all_arithmetic_operators): + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + # Right now, we have trouble with this. Returning NotImplemented + # fails other tests like + # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: + # test_dt64_seris_add_intlike + return super( + TestArithmeticOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestCasting(BaseDatetimeTests, base.BaseCastingTests): + pass + + +class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): + + def _compare_other(self, s, data, op_name, other): + # the base test is not appropriate for us. We raise on comparison + # with (some) integers, depending on the value. + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + return super( + TestComparisonOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestMissing(BaseDatetimeTests, base.BaseMissingTests): + pass + + +class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): + + @pytest.mark.skip(reason="We have DatetimeTZBlock") + def test_concat(self, data, in_frame): + pass + + def test_concat_mixed_dtypes(self, data): + # concat(Series[datetimetz], Series[category]) uses a + # plain np.array(values) on the DatetimeArray, which + # drops the tz. + super(TestReshaping, self).test_concat_mixed_dtypes(data) + + @pytest.mark.parametrize("obj", ["series", "frame"]) + def test_unstack(self, obj): + # GH-13287: can't use base test, since building the expected fails. + data = DatetimeArray._from_sequence(['2000', '2001', '2002', '2003'], + tz='US/Central') + index = pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), + names=['a', 'b']) + + if obj == "series": + ser = pd.Series(data, index=index) + expected = pd.DataFrame({ + "A": data.take([0, 1]), + "B": data.take([2, 3]) + }, index=pd.Index(['a', 'b'], name='b')) + expected.columns.name = 'a' + + else: + ser = pd.DataFrame({"A": data, "B": data}, index=index) + expected = pd.DataFrame( + {("A", "A"): data.take([0, 1]), + ("A", "B"): data.take([2, 3]), + ("B", "A"): data.take([0, 1]), + ("B", "B"): data.take([2, 3])}, + index=pd.Index(['a', 'b'], name='b') + ) + expected.columns.names = [None, 'a'] + + result = ser.unstack(0) + self.assert_equal(result, expected) + + +class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): + pass + + +class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests): + pass + + +class TestPrinting(BaseDatetimeTests, base.BasePrintingTests): + pass diff --git a/pandas/tests/extension/test_timedelta.py b/pandas/tests/extension/test_timedelta.py new file mode 100644 index 0000000000000..2fe67a76f3ad2 --- /dev/null +++ b/pandas/tests/extension/test_timedelta.py @@ -0,0 +1,29 @@ +""" +Extension Tests for arrays.TimedeltaArray. + +Currently, we only run the Dtype tests, as we do not allow a +TimedeltaArray inside internals. +""" +import numpy as np +import pytest + +import pandas as pd +from pandas.tests.extension import base + + +@pytest.fixture +def dtype(): + return pd.TimedeltaDtype() + + +@pytest.fixture +def data(): + return pd.arrays.TimedeltaArray(np.arange(0, 100, dtype='m8[ns]')) + + +class BaseTimedeltaTests(object): + pass + + +class TestDtype(BaseTimedeltaTests, base.BaseDtypeTests): + pass
Closes https://github.com/pandas-dev/pandas/issues/24662 TODO: - [ ] Dedicated tests for new is_<foo> types - [ ] Run the extension dtype tests for new dtypes - [ ] (maybe?) see if the new EA tests pass for the new arrays. Tricky, since they aren't allowed internally. g - [ ] failing dtype validation tests This is going to fail. Going offline for a bit, but putting this up right now. I'm going to clean up the changes in `internals` and the series constructor.
https://api.github.com/repos/pandas-dev/pandas/pulls/24674
2019-01-08T15:49:30Z
2019-07-29T15:21:36Z
null
2019-07-29T15:21:36Z
REF: move templated index.pyx code to non-template
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index c919086701536..8cea529fbb07e 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -226,7 +226,13 @@ cdef class IndexEngine: return self.vgetter() def _call_monotonic(self, values): - raise NotImplementedError + return algos.is_monotonic(values, timelike=False) + + def get_backfill_indexer(self, other, limit=None): + return algos.backfill(self._get_index_values(), other, limit=limit) + + def get_pad_indexer(self, other, limit=None): + return algos.pad(self._get_index_values(), other, limit=limit) cdef _make_hash_table(self, n): raise NotImplementedError @@ -371,6 +377,14 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1: return mid + 1 +cdef class ObjectEngine(IndexEngine): + """ + Index Engine for use with object-dtype Index, namely the base class Index + """ + cdef _make_hash_table(self, n): + return _hash.PyObjectHashTable(n) + + cdef class DatetimeEngine(Int64Engine): cdef _get_box_dtype(self): diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index 6383c1534fb44..3c9a096e7ecc0 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -21,7 +21,6 @@ dtypes = [('Float64', 'float64', 'float64_t', 'Float64', 'float64'), ('UInt32', 'uint32', 'uint32_t', 'UInt64', 'uint64'), ('UInt16', 'uint16', 'uint16_t', 'UInt64', 'uint64'), ('UInt8', 'uint8', 'uint8_t', 'UInt64', 'uint64'), - ('Object', 'object', 'object', 'PyObject', 'object'), ] }} @@ -30,30 +29,15 @@ dtypes = [('Float64', 'float64', 'float64_t', 'Float64', 'float64'), cdef class {{name}}Engine(IndexEngine): - def _call_monotonic(self, values): - return algos.is_monotonic(values, timelike=False) - - def get_backfill_indexer(self, other, limit=None): - return algos.backfill(self._get_index_values(), other, limit=limit) - - def get_pad_indexer(self, other, limit=None): - return algos.pad(self._get_index_values(), other, limit=limit) - cdef _make_hash_table(self, n): return _hash.{{hashtable_name}}HashTable(n) - {{if name not in {'Float64', 'Float32', 'Object'} }} + {{if name not in {'Float64', 'Float32'} }} cdef _check_type(self, object val): - hash(val) - if util.is_bool_object(val): - raise KeyError(val) - elif util.is_float_object(val): - raise KeyError(val) - elif not util.is_integer_object(val): + if not util.is_integer_object(val): raise KeyError(val) {{endif}} - {{if name != 'Object'}} cpdef _call_map_locations(self, values): # self.mapping is of type {{hashtable_name}}HashTable, # so convert dtype of values @@ -87,6 +71,4 @@ cdef class {{name}}Engine(IndexEngine): raise KeyError(val) - {{endif}} - {{endfor}}
Following cleanup of libalgos, we can get a little bit of code out of the tempita file.
https://api.github.com/repos/pandas-dev/pandas/pulls/24669
2019-01-08T03:28:36Z
2019-01-08T19:49:46Z
2019-01-08T19:49:46Z
2019-01-08T21:26:57Z
DOC: Removed duplicate doc line
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7bbbdd70e062e..a09f20b5453ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6217,8 +6217,6 @@ def _gotitem(self, array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. - `agg` is an alias for `aggregate`. Use the alias. - See Also -------- DataFrame.apply : Perform any type of operations.
This line of documentation is already included in the parent shared docs. So the line appears twice. See the screenshot below from [the Dataframe.aggregate page](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.aggregate.html). My patch cleans that up. ![screenshot from 2019-01-07 17-00-12](https://user-images.githubusercontent.com/9993/50803035-ca04fa00-129d-11e9-8078-a0a3d9a55400.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/24668
2019-01-08T01:01:01Z
2019-01-26T15:14:36Z
null
2019-01-26T15:14:36Z
Ensure TDA.__init__ validates freq
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 1ec37c9f228a6..47b3f93f88b78 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -15,8 +15,8 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_float_dtype, - is_integer_dtype, is_list_like, is_object_dtype, is_scalar, + _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_dtype_equal, + is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -134,55 +134,39 @@ def dtype(self): _attributes = ["freq"] def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): - if isinstance(values, (ABCSeries, ABCIndexClass)): - values = values._values - - if isinstance(values, type(self)): - values, freq, freq_infer = extract_values_freq(values, freq) - - if not isinstance(values, np.ndarray): - msg = ( + if not hasattr(values, "dtype"): + raise ValueError( "Unexpected type '{}'. 'values' must be a TimedeltaArray " "ndarray, or Series or Index containing one of those." - ) - raise ValueError(msg.format(type(values).__name__)) - - if values.dtype == 'i8': - # for compat with datetime/timedelta/period shared methods, - # we can sometimes get here with int64 values. These represent - # nanosecond UTC (or tz-naive) unix timestamps - values = values.view(_TD_DTYPE) - - if values.dtype != _TD_DTYPE: - raise TypeError(_BAD_DTYPE.format(dtype=values.dtype)) - - try: - dtype_mismatch = dtype != _TD_DTYPE - except TypeError: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) - else: - if dtype_mismatch: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) - + .format(type(values).__name__)) if freq == "infer": - msg = ( + raise ValueError( "Frequency inference not allowed in TimedeltaArray.__init__. " - "Use 'pd.array()' instead." - ) - raise ValueError(msg) + "Use 'pd.array()' instead.") - if copy: - values = values.copy() - if freq: - freq = to_offset(freq) + if dtype is not None and not is_dtype_equal(dtype, _TD_DTYPE): + raise TypeError("dtype {dtype} cannot be converted to " + "timedelta64[ns]".format(dtype=dtype)) + + if values.dtype == 'i8': + values = values.view('timedelta64[ns]') - self._data = values - self._dtype = dtype - self._freq = freq + result = type(self)._from_sequence(values, dtype=dtype, + copy=copy, freq=freq) + self._data = result._data + self._freq = result._freq + self._dtype = result._dtype @classmethod def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): - return cls(values, dtype=dtype, freq=freq) + assert dtype == _TD_DTYPE, dtype + assert isinstance(values, np.ndarray), type(values) + + result = object.__new__(cls) + result._data = values.view(_TD_DTYPE) + result._freq = to_offset(freq) + result._dtype = _TD_DTYPE + return result @classmethod def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, @@ -860,17 +844,17 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): data = data._data # Convert whatever we have into timedelta64[ns] dtype - if is_object_dtype(data) or is_string_dtype(data): + if is_object_dtype(data.dtype) or is_string_dtype(data.dtype): # no need to make a copy, need to convert if string-dtyped data = objects_to_td64ns(data, unit=unit, errors=errors) copy = False - elif is_integer_dtype(data): + elif is_integer_dtype(data.dtype): # treat as multiples of the given unit data, copy_made = ints_to_td64ns(data, unit=unit) copy = copy and not copy_made - elif is_float_dtype(data): + elif is_float_dtype(data.dtype): # treat as multiples of the given unit. If after converting to nanos, # there are fractional components left, these are truncated # (i.e. NOT rounded) @@ -880,7 +864,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): data[mask] = iNaT copy = False - elif is_timedelta64_dtype(data): + elif is_timedelta64_dtype(data.dtype): if data.dtype != _TD_DTYPE: # non-nano unit # TODO: watch out for overflows @@ -998,18 +982,3 @@ def _generate_regular_range(start, end, periods, offset): data = np.arange(b, e, stride, dtype=np.int64) return data - - -def extract_values_freq(arr, freq): - # type: (TimedeltaArray, Offset) -> Tuple[ndarray, Offset, bool] - freq_infer = False - if freq is None: - freq = arr.freq - elif freq and arr.freq: - freq = to_offset(freq) - freq, freq_infer = dtl.validate_inferred_freq( - freq, arr.freq, - freq_infer=False - ) - values = arr._data - return values, freq, freq_infer diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index b9d6b8da2cada..893926cc076ab 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -233,12 +233,14 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): if not isinstance(values, TimedeltaArray): values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq) + else: + if freq is None: + freq = values.freq assert isinstance(values, TimedeltaArray), type(values) assert dtype == _TD_DTYPE, dtype assert values.dtype == 'm8[ns]', values.dtype - freq = to_offset(freq) - tdarr = TimedeltaArray._simple_new(values, freq=freq) + tdarr = TimedeltaArray._simple_new(values._data, freq=freq) result = object.__new__(cls) result._data = tdarr result.name = name diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 481350640e1a6..af23b2467fcdf 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -9,6 +9,15 @@ class TestTimedeltaArrayConstructor(object): + def test_freq_validation(self): + # ensure that the public constructor cannot create an invalid instance + arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9 + + msg = ("Inferred frequency None from passed values does not " + "conform to passed frequency D") + with pytest.raises(ValueError, match=msg): + TimedeltaArray(arr.view('timedelta64[ns]'), freq="D") + def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): TimedeltaArray([1, 2, 3]) @@ -34,7 +43,7 @@ def test_incorrect_dtype_raises(self): def test_copy(self): data = np.array([1, 2, 3], dtype='m8[ns]') arr = TimedeltaArray(data, copy=False) - assert arr._data is data + assert arr._data.base is data arr = TimedeltaArray(data, copy=True) assert arr._data is not data
Users should not be able to construct invalid instances with the public constructor. De-duplicates some code.
https://api.github.com/repos/pandas-dev/pandas/pulls/24666
2019-01-07T23:36:52Z
2019-01-09T16:21:51Z
2019-01-09T16:21:50Z
2019-01-09T18:13:24Z
Have DTA._simple_new take dtype instead of tz
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2de22e062b29b..cfb697b3c357a 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -296,12 +296,11 @@ def _round(self, freq, mode, ambiguous, nonexistent): result = round_nsint64(values, mode, freq) result = self._maybe_mask_results(result, fill_value=NaT) - attribs = self._get_attributes_dict() - attribs['freq'] = None - if 'tz' in attribs: - attribs['tz'] = None + dtype = self.dtype + if is_datetime64tz_dtype(self): + dtype = None return self._ensure_localized( - self._simple_new(result, **attribs), ambiguous, nonexistent + self._simple_new(result, dtype=dtype), ambiguous, nonexistent ) @Appender((_round_doc + _round_example).format(op="round")) @@ -434,8 +433,6 @@ def __getitem__(self, key): else: key = lib.maybe_booleans_to_slice(key.view(np.uint8)) - attribs = self._get_attributes_dict() - is_period = is_period_dtype(self) if is_period: freq = self.freq @@ -451,17 +448,15 @@ def __getitem__(self, key): # should preserve `freq` attribute freq = self.freq - attribs['freq'] = freq - result = getitem(key) if result.ndim > 1: # To support MPL which performs slicing with 2 dim # even though it only has 1 dim by definition if is_period: - return self._simple_new(result, **attribs) + return self._simple_new(result, dtype=self.dtype, freq=freq) return result - return self._simple_new(result, **attribs) + return self._simple_new(result, dtype=self.dtype, freq=freq) def __setitem__( self, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 708b4b074abcc..efa1757a989fc 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -35,6 +35,24 @@ _midnight = time(0, 0) +def tz_to_dtype(tz): + """ + Return a datetime64[ns] dtype appropriate for the given timezone. + + Parameters + ---------- + tz : tzinfo or None + + Returns + ------- + np.dtype or Datetime64TZDType + """ + if tz is None: + return _NS_DTYPE + else: + return DatetimeTZDtype(tz=tz) + + def _to_M8(key, tz=None): """ Timestamp-like => dt64 @@ -305,13 +323,7 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): self._freq = freq @classmethod - def _simple_new(cls, values, freq=None, tz=None): - """ - we require the we have a dtype compat for the values - if we are passed a non-dtype compat, then coerce using the constructor - """ - dtype = DatetimeTZDtype(tz=tz) if tz else _NS_DTYPE - + def _simple_new(cls, values, freq=None, dtype=None): return cls(values, freq=freq, dtype=dtype) @classmethod @@ -328,7 +340,8 @@ def _from_sequence(cls, data, dtype=None, copy=False, freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) - result = cls._simple_new(subarr, freq=freq, tz=tz) + dtype = tz_to_dtype(tz) + result = cls._simple_new(subarr, freq=freq, dtype=dtype) if inferred_freq is None and freq is not None: # this condition precludes `freq_infer` @@ -395,7 +408,7 @@ def _generate_range(cls, start, end, periods, freq, tz=None, end = end.tz_localize(None) # TODO: consider re-implementing _cached_range; GH#17914 values, _tz = generate_regular_range(start, end, periods, freq) - index = cls._simple_new(values, freq=freq, tz=_tz) + index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz)) if tz is not None and index.tz is None: arr = conversion.tz_localize_to_utc( @@ -418,8 +431,9 @@ def _generate_range(cls, start, end, periods, freq, tz=None, arr = np.linspace( 0, end.value - start.value, periods, dtype='int64') + start.value + dtype = tz_to_dtype(tz) index = cls._simple_new( - arr.astype('M8[ns]', copy=False), freq=None, tz=tz + arr.astype('M8[ns]', copy=False), freq=None, dtype=dtype ) if not left_closed and len(index) and index[0] == start: @@ -427,7 +441,8 @@ def _generate_range(cls, start, end, periods, freq, tz=None, if not right_closed and len(index) and index[-1] == end: index = index[:-1] - return cls._simple_new(index.asi8, freq=freq, tz=tz) + dtype = tz_to_dtype(tz) + return cls._simple_new(index.asi8, freq=freq, dtype=dtype) # ----------------------------------------------------------------- # DatetimeLike Interface @@ -806,7 +821,8 @@ def tz_convert(self, tz): 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with - return self._simple_new(self.asi8, tz=tz, freq=self.freq) + dtype = tz_to_dtype(tz) + return self._simple_new(self.asi8, dtype=dtype, freq=self.freq) def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', errors=None): @@ -995,7 +1011,8 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, ) new_dates = new_dates.view(_NS_DTYPE) - return self._simple_new(new_dates, tz=tz, freq=self.freq) + dtype = tz_to_dtype(tz) + return self._simple_new(new_dates, dtype=dtype, freq=self.freq) # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timestamp methods diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1f2a3636033b4..664ca9c5d2f05 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -22,7 +22,7 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays.datetimes import ( - DatetimeArray, _to_M8, validate_tz_from_dtype) + DatetimeArray, _to_M8, tz_to_dtype, validate_tz_from_dtype) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index @@ -326,7 +326,9 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): # DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes if isinstance(values, DatetimeIndex): values = values._data - dtarr = DatetimeArray._simple_new(values, freq=freq, tz=tz) + + dtype = tz_to_dtype(tz) + dtarr = DatetimeArray._simple_new(values, freq=freq, dtype=dtype) assert isinstance(dtarr, DatetimeArray) result = object.__new__(cls) @@ -401,7 +403,8 @@ def __setstate__(self, state): freq = own_state[1] tz = timezones.tz_standardize(own_state[2]) - dtarr = DatetimeArray._simple_new(data, freq=freq, tz=tz) + dtype = tz_to_dtype(tz) + dtarr = DatetimeArray._simple_new(data, freq=freq, dtype=dtype) self.name = own_state[0] diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 13dca433f9ead..70e4f44cb5de8 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2389,7 +2389,7 @@ def _try_coerce_result(self, result): result = result.reshape(np.prod(result.shape)) # GH#24096 new values invalidates a frequency result = self._holder._simple_new(result, freq=None, - tz=self.values.tz) + dtype=self.values.dtype) return result diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 573f02fe0aa52..73f85d954432e 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -935,7 +935,7 @@ def apply_index(self, i): shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt) # TODO: going through __new__ raises on call to _validate_frequency; # are we passing incorrect freq? - return type(i)._simple_new(shifted, freq=i.freq, tz=i.tz) + return type(i)._simple_new(shifted, freq=i.freq, dtype=i.dtype) class MonthEnd(MonthOffset): @@ -1642,7 +1642,7 @@ def apply_index(self, dtindex): # TODO: going through __new__ raises on call to _validate_frequency; # are we passing incorrect freq? return type(dtindex)._simple_new(shifted, freq=dtindex.freq, - tz=dtindex.tz) + dtype=dtindex.dtype) class BQuarterEnd(QuarterOffset): @@ -1722,7 +1722,7 @@ def apply_index(self, dtindex): # TODO: going through __new__ raises on call to _validate_frequency; # are we passing incorrect freq? return type(dtindex)._simple_new(shifted, freq=dtindex.freq, - tz=dtindex.tz) + dtype=dtindex.dtype) def onOffset(self, dt): if self.normalize and not _is_normalized(dt):
Allows for better code-sharing. I'm pretty sure after this we can get rid of _get_attributes_dict
https://api.github.com/repos/pandas-dev/pandas/pulls/24665
2019-01-07T23:13:21Z
2019-01-08T12:58:49Z
2019-01-08T12:58:49Z
2019-01-08T16:47:03Z
BLD: Include tslibs src
diff --git a/setup.py b/setup.py index 7ba4f5ba399d0..ed2d905f4358b 100755 --- a/setup.py +++ b/setup.py @@ -491,7 +491,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] -ts_include = ['pandas/_libs/tslibs/src'] +ts_include = ['pandas/_libs/tslibs/src', 'pandas/_libs/tslibs'] lib_depends = ['pandas/_libs/src/parse_helper.h',
Fixes the pip build in pandas-dev/pandas-ci. This worked for me locally in an env setup with https://github.com/pandas-dev/pandas-release. Closes https://github.com/pandas-dev/pandas/issues/22192
https://api.github.com/repos/pandas-dev/pandas/pulls/24664
2019-01-07T22:31:50Z
2019-01-08T02:51:36Z
2019-01-08T02:51:36Z
2019-01-08T03:50:55Z
BUG: fix to_datetime failing to raise on mixed tznaive/tzaware datetimes
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 4bc50695e1ecd..797c3faeb1fd0 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1549,6 +1549,7 @@ Datetimelike - Bug in :class:`PeriodIndex` where comparisons against an array-like object with length 1 failed to raise ``ValueError`` (:issue:`23078`) - Bug in :meth:`DatetimeIndex.astype`, :meth:`PeriodIndex.astype` and :meth:`TimedeltaIndex.astype` ignoring the sign of the ``dtype`` for unsigned integer dtypes (:issue:`24405`). - Fixed bug in :meth:`Series.max` with ``datetime64[ns]``-dtype failing to return ``NaT`` when nulls are present and ``skipna=False`` is passed (:issue:`24265`) +- Bug in :func:`to_datetime` where arrays of ``datetime`` objects containing both timezone-aware and timezone-naive ``datetimes`` would fail to raise ``ValueError`` (:issue:`24569`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 6aa02ca1e5421..6c8b732928bc3 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -167,6 +167,7 @@ def datetime_to_datetime64(values: object[:]): int64_t[:] iresult npy_datetimestruct dts _TSObject _ts + bint found_naive = False result = np.empty(n, dtype='M8[ns]') iresult = result.view('i8') @@ -176,6 +177,9 @@ def datetime_to_datetime64(values: object[:]): iresult[i] = NPY_NAT elif PyDateTime_Check(val): if val.tzinfo is not None: + if found_naive: + raise ValueError('Cannot mix tz-aware with ' + 'tz-naive values') if inferred_tz is not None: if not tz_compare(val.tzinfo, inferred_tz): raise ValueError('Array must be all same time zone') @@ -186,6 +190,7 @@ def datetime_to_datetime64(values: object[:]): iresult[i] = _ts.value check_dts_bounds(&_ts.dts) else: + found_naive = True if inferred_tz is not None: raise ValueError('Cannot mix tz-aware with ' 'tz-naive values') diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 1d09a1f65e43f..0698b43b16c05 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -151,11 +151,7 @@ def test_array_inference(data, expected): [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], # Mix of tz-aware and tz-naive [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")], - # GH-24569 - pytest.param( - np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]), - marks=pytest.mark.xfail(reason="bug in DTA._from_sequence") - ), + np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]), ]) def test_array_inference_fails(data): result = pd.array(data) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8d0e4f5a90557..8228ed7652fea 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -16,6 +16,24 @@ class TestDatetimeArrayConstructor(object): + @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, + sequence_to_dt64ns, + pd.to_datetime, + pd.DatetimeIndex]) + def test_mixing_naive_tzaware_raises(self, meth): + # GH#24569 + arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) + + msg = ('Cannot mix tz-aware with tz-naive values|' + 'Tz-aware datetime.datetime cannot be converted ' + 'to datetime64 unless utc=True') + + for obj in [arr, arr[::-1]]: + # check that we raise regardless of whether naive is found + # before aware or vice-versa + with pytest.raises(ValueError, match=msg): + meth(obj) + def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 97de4cd98dedf..07c42afe44b33 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -306,16 +306,6 @@ def test_construction_dti_with_mixed_timezones(self): tm.assert_index_equal(result, exp, exact=True) assert isinstance(result, DatetimeIndex) - # different tz coerces tz-naive to tz-awareIndex(dtype=object) - result = DatetimeIndex([Timestamp('2011-01-01 10:00'), - Timestamp('2011-01-02 10:00', - tz='US/Eastern')], name='idx') - exp = DatetimeIndex([Timestamp('2011-01-01 05:00'), - Timestamp('2011-01-02 10:00')], - tz='US/Eastern', name='idx') - tm.assert_index_equal(result, exp, exact=True) - assert isinstance(result, DatetimeIndex) - # tz mismatch affecting to tz-aware raises TypeError/ValueError with pytest.raises(ValueError): @@ -323,7 +313,8 @@ def test_construction_dti_with_mixed_timezones(self): Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') - with pytest.raises(TypeError, match='data is already tz-aware'): + msg = 'cannot be converted to datetime64' + with pytest.raises(ValueError, match=msg): DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='Asia/Tokyo', name='idx') @@ -333,7 +324,7 @@ def test_construction_dti_with_mixed_timezones(self): Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='US/Eastern', name='idx') - with pytest.raises(TypeError, match='data is already tz-aware'): + with pytest.raises(ValueError, match=msg): # passing tz should results in DatetimeIndex, then mismatch raises # TypeError Index([pd.NaT, Timestamp('2011-01-01 10:00'),
- [x] closes #24569 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24663
2019-01-07T18:46:53Z
2019-01-09T12:15:10Z
2019-01-09T12:15:09Z
2019-01-09T14:48:37Z
API: Datetime/TimedeltaArray from to_datetime
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index 32c08e40b8033..c7be8e3f745c4 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -1,6 +1,7 @@ from pandas._libs import lib, tslibs -from pandas.core.dtypes.common import is_extension_array_dtype +from pandas.core.dtypes.common import ( + is_datetime64_ns_dtype, is_extension_array_dtype, is_timedelta64_ns_dtype) from pandas.core.dtypes.dtypes import registry from pandas import compat @@ -75,9 +76,10 @@ def array(data, # type: Sequence[object] See Also -------- numpy.array : Construct a NumPy array. - arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. + arrays.PandasArray : ExtensionArray wrapping a NumPy array. + Series.array : Extract the array stored within a Series. Notes ----- @@ -120,6 +122,26 @@ def array(data, # type: Sequence[object] ['a', 'b'] Length: 2, dtype: str32 + Finally, Pandas has arrays that mostly overlap with NumPy + + * :class:`arrays.DatetimeArray` + * :class:`arrays.TimedeltaArray` + + When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is + passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` + rather than a ``PandasArray``. This is for symmetry with the case of + timezone-aware data, which NumPy does not natively support. + + >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') + <DatetimeArray> + ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] + Length: 2, dtype: datetime64[ns] + + >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') + <TimedeltaArray> + ['01:00:00', '02:00:00'] + Length: 2, dtype: timedelta64[ns] + Examples -------- If a dtype is not specified, `data` is passed through to @@ -239,5 +261,14 @@ def array(data, # type: Sequence[object] # TODO(BooleanArray): handle this type + # Pandas overrides NumPy for + # 1. datetime64[ns] + # 2. timedelta64[ns] + # so that a DatetimeArray is returned. + if is_datetime64_ns_dtype(dtype): + return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) + elif is_timedelta64_ns_dtype(dtype): + return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) + result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) return result diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 1d09a1f65e43f..69221c5048307 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -36,8 +36,36 @@ # Datetime (naive) ([1, 2], np.dtype('datetime64[ns]'), - PandasArray(np.array([1, 2], dtype='datetime64[ns]'))), - # TODO(DatetimeArray): add here + pd.arrays.DatetimeArray._from_sequence( + np.array([1, 2], dtype='datetime64[ns]'))), + + (np.array([1, 2], dtype='datetime64[ns]'), None, + pd.arrays.DatetimeArray._from_sequence( + np.array([1, 2], dtype='datetime64[ns]'))), + + (pd.DatetimeIndex(['2000', '2001']), np.dtype('datetime64[ns]'), + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'])), + + (pd.DatetimeIndex(['2000', '2001']), None, + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'])), + + (['2000', '2001'], np.dtype('datetime64[ns]'), + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'])), + + # Datetime (tz-aware) + (['2000', '2001'], pd.DatetimeTZDtype(tz="CET"), + pd.arrays.DatetimeArray._from_sequence( + ['2000', '2001'], dtype=pd.DatetimeTZDtype(tz="CET"))), + + # Timedelta + (['1H', '2H'], np.dtype('timedelta64[ns]'), + pd.arrays.TimedeltaArray._from_sequence(['1H', '2H'])), + + (pd.TimedeltaIndex(['1H', '2H']), np.dtype('timedelta64[ns]'), + pd.arrays.TimedeltaArray._from_sequence(['1H', '2H'])), + + (pd.TimedeltaIndex(['1H', '2H']), None, + pd.arrays.TimedeltaArray._from_sequence(['1H', '2H'])), # Category (['a', 'b'], 'category', pd.Categorical(['a', 'b'])), diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 31cbea8f95090..772617c494aef 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -313,6 +313,20 @@ def test_constructor_no_pandas_array(self): tm.assert_series_equal(ser, result) assert isinstance(result._data.blocks[0], IntBlock) + def test_from_array(self): + result = pd.Series(pd.array(['1H', '2H'], dtype='timedelta64[ns]')) + assert result._data.blocks[0].is_extension is False + + result = pd.Series(pd.array(['2015'], dtype='datetime64[ns]')) + assert result._data.blocks[0].is_extension is False + + def test_from_list_dtype(self): + result = pd.Series(['1H', '2H'], dtype='timedelta64[ns]') + assert result._data.blocks[0].is_extension is False + + result = pd.Series(['2015'], dtype='datetime64[ns]') + assert result._data.blocks[0].is_extension is False + def test_hasnans_unchached_for_series(): # GH#19700
Closes https://github.com/pandas-dev/pandas/issues/24656
https://api.github.com/repos/pandas-dev/pandas/pulls/24660
2019-01-07T13:09:54Z
2019-01-08T19:40:52Z
2019-01-08T19:40:52Z
2019-01-08T19:40:57Z
DOC: small doc fixup of missing pd.
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 7c06288c01221..02cbc7e2c3b6d 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -76,7 +76,7 @@ The exact details of what an :class:`~pandas.api.extensions.ExtensionArray` is a beyond the scope of this introduction. See :ref:`basics.dtypes` for more. If you know you need a NumPy array, use :meth:`~Series.to_numpy` -or :meth:`numpy.ndarray.asarray`. +or :meth:`numpy.asarray`. .. ipython:: python diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index 6bde4f1b9cf99..c53fee42548e9 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -984,9 +984,9 @@ Removal of prior version deprecations/changes .. ipython:: python np.random.seed(1234) - df = DataFrame(np.random.randn(5, 2), - columns=list('AB'), - index=date_range('20130101', periods=5)) + df = pd.DataFrame(np.random.randn(5, 2), + columns=list('AB'), + index=pd.date_range('2013-01-01', periods=5)) df Previously
There was a failing example again in the IPython directive. Follow-up on https://github.com/pandas-dev/pandas/pull/24649 and https://github.com/pandas-dev/pandas/pull/24650
https://api.github.com/repos/pandas-dev/pandas/pulls/24659
2019-01-07T13:03:49Z
2019-01-07T14:27:58Z
2019-01-07T14:27:58Z
2019-01-07T14:35:54Z
XLSB support in read_excel() - #8540
diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000000..e947f30d285cd --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,147 @@ +version: 2 +jobs: + + # -------------------------------------------------------------------------- + # 0. py27_compat + # -------------------------------------------------------------------------- + py27_compat: + docker: + - image: continuumio/miniconda:latest + # databases configuration + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: postgres + POSTGRES_DB: pandas_nosetest + - image: circleci/mysql:8-ram + environment: + MYSQL_USER: "root" + MYSQL_HOST: "localhost" + MYSQL_ALLOW_EMPTY_PASSWORD: "true" + MYSQL_DATABASE: "pandas_nosetest" + environment: + JOB: "2.7_COMPAT" + ENV_FILE: "ci/circle-27-compat.yaml" + LOCALE_OVERRIDE: "it_IT.UTF-8" + MINICONDA_DIR: /home/ubuntu/miniconda3 + steps: + - checkout + - run: + name: build + command: | + ./ci/install_circle.sh + ./ci/show_circle.sh + - run: + name: test + command: ./ci/run_circle.sh --skip-slow --skip-network + + # -------------------------------------------------------------------------- + # 1. py36_locale + # -------------------------------------------------------------------------- + py36_locale: + docker: + - image: continuumio/miniconda:latest + # databases configuration + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: postgres + POSTGRES_DB: pandas_nosetest + - image: circleci/mysql:8-ram + environment: + MYSQL_USER: "root" + MYSQL_HOST: "localhost" + MYSQL_ALLOW_EMPTY_PASSWORD: "true" + MYSQL_DATABASE: "pandas_nosetest" + + environment: + JOB: "3.6_LOCALE" + ENV_FILE: "ci/circle-36-locale.yaml" + LOCALE_OVERRIDE: "zh_CN.UTF-8" + MINICONDA_DIR: /home/ubuntu/miniconda3 + steps: + - checkout + - run: + name: build + command: | + ./ci/install_circle.sh + ./ci/show_circle.sh + - run: + name: test + command: ./ci/run_circle.sh --skip-slow --skip-network + + # -------------------------------------------------------------------------- + # 2. py36_locale_slow + # -------------------------------------------------------------------------- + py36_locale_slow: + docker: + - image: continuumio/miniconda:latest + # databases configuration + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: postgres + POSTGRES_DB: pandas_nosetest + - image: circleci/mysql:8-ram + environment: + MYSQL_USER: "root" + MYSQL_HOST: "localhost" + MYSQL_ALLOW_EMPTY_PASSWORD: "true" + MYSQL_DATABASE: "pandas_nosetest" + + environment: + JOB: "3.6_LOCALE_SLOW" + ENV_FILE: "ci/circle-36-locale_slow.yaml" + LOCALE_OVERRIDE: "zh_CN.UTF-8" + MINICONDA_DIR: /home/ubuntu/miniconda3 + steps: + - checkout + - run: + name: build + command: | + ./ci/install_circle.sh + ./ci/show_circle.sh + - run: + name: test + command: ./ci/run_circle.sh --only-slow --skip-network + + # -------------------------------------------------------------------------- + # 3. py35_ascii + # -------------------------------------------------------------------------- + py35_ascii: + docker: + - image: continuumio/miniconda:latest + # databases configuration + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: postgres + POSTGRES_DB: pandas_nosetest + - image: circleci/mysql:8-ram + environment: + MYSQL_USER: "root" + MYSQL_HOST: "localhost" + MYSQL_ALLOW_EMPTY_PASSWORD: "true" + MYSQL_DATABASE: "pandas_nosetest" + + environment: + JOB: "3.5_ASCII" + ENV_FILE: "ci/circle-35-ascii.yaml" + LOCALE_OVERRIDE: "C" + MINICONDA_DIR: /home/ubuntu/miniconda3 + steps: + - checkout + - run: + name: build + command: | + ./ci/install_circle.sh + ./ci/show_circle.sh + - run: + name: test + command: ./ci/run_circle.sh --skip-slow --skip-network + + +workflows: + version: 2 + build_and_test: + jobs: + - py27_compat + - py36_locale + - py36_locale_slow + - py35_ascii diff --git a/.travis.yml b/.travis.yml index 4e25380a7d941..2d2a0bc019c80 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,11 @@ matrix: language: generic env: - JOB="3.5, OSX" ENV_FILE="ci/travis-35-osx.yaml" TEST_ARGS="--skip-slow --skip-network" + + - dist: trusty + env: + - JOB="3.7" ENV_FILE="ci/travis-37.yaml" TEST_ARGS="--skip-slow --skip-network" + - dist: trusty env: - JOB="2.7, locale, slow, old NumPy" ENV_FILE="ci/travis-27-locale.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" SLOW=true diff --git a/MANIFEST.in b/MANIFEST.in index 9773019c6e6e0..b417b8890fa24 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,27 +3,39 @@ include LICENSE include RELEASE.md include README.md include setup.py -include pyproject.toml graft doc prune doc/build +graft LICENSES + graft pandas -global-exclude *.so -global-exclude *.pyd +global-exclude *.bz2 +global-exclude *.csv +global-exclude *.dta +global-exclude *.gz +global-exclude *.h5 +global-exclude *.html +global-exclude *.json +global-exclude *.msgpack +global-exclude *.pickle +global-exclude *.png global-exclude *.pyc +global-exclude *.pyd +global-exclude *.sas7bdat +global-exclude *.so +global-exclude *.xls +global-exclude *.xlsm +global-exclude *.xlsx +global-exclude *.xpt +global-exclude *.xz +global-exclude *.zip global-exclude *~ -global-exclude \#* -global-exclude .git* global-exclude .DS_Store -global-exclude *.png +global-exclude .git* +global-exclude \#* -# include examples/data/* -# recursive-include examples *.py -# recursive-include doc/source * -# recursive-include doc/sphinxext * -# recursive-include LICENSES * include versioneer.py include pandas/_version.py include pandas/io/formats/templates/*.tpl diff --git a/appveyor.yml b/appveyor.yml index f70fc829ec971..c6199c1493f22 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -20,12 +20,14 @@ environment: matrix: - CONDA_ROOT: "C:\\Miniconda3_64" + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" CONDA_NPY: "113" - CONDA_ROOT: "C:\\Miniconda3_64" + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 PYTHON_VERSION: "2.7" PYTHON_ARCH: "64" CONDA_PY: "27" diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index ae1d7029217a4..5464e7cba22c3 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -173,3 +173,23 @@ def setup(self, dtype): def time_isin_categorical(self, dtype): self.series.isin(self.sample) + + +class IsMonotonic(object): + + def setup(self): + N = 1000 + self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N)) + self.s = pd.Series(self.c) + + def time_categorical_index_is_monotonic_increasing(self): + self.c.is_monotonic_increasing + + def time_categorical_index_is_monotonic_decreasing(self): + self.c.is_monotonic_decreasing + + def time_categorical_series_is_monotonic_increasing(self): + self.s.is_monotonic_increasing + + def time_categorical_series_is_monotonic_decreasing(self): + self.s.is_monotonic_decreasing diff --git a/ci/appveyor-27.yaml b/ci/appveyor-27.yaml index 84107c605b14f..e47ebf75344fa 100644 --- a/ci/appveyor-27.yaml +++ b/ci/appveyor-27.yaml @@ -12,7 +12,7 @@ dependencies: - matplotlib - numexpr - numpy=1.10* - - openpyxl + - openpyxl=2.5.5 - pytables==3.2.2 - python=2.7.* - pytz diff --git a/ci/appveyor-36.yaml b/ci/appveyor-36.yaml index 5e370de39958a..d007f04ca0720 100644 --- a/ci/appveyor-36.yaml +++ b/ci/appveyor-36.yaml @@ -10,7 +10,7 @@ dependencies: - matplotlib - numexpr - numpy=1.13* - - openpyxl + - openpyxl=2.5.5 - pyarrow - pytables - python-dateutil diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml index 81a48d4edf11c..e037877819b14 100644 --- a/ci/circle-27-compat.yaml +++ b/ci/circle-27-compat.yaml @@ -4,11 +4,11 @@ channels: - conda-forge dependencies: - bottleneck=1.0.0 - - cython=0.24 + - cython=0.28.2 - jinja2=2.8 - numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr - - numpy=1.9.2 - - openpyxl + - numpy=1.9.3 + - openpyxl=2.5.5 - psycopg2 - pytables=3.2.2 - python-dateutil=2.5.0 diff --git a/ci/circle-35-ascii.yaml b/ci/circle-35-ascii.yaml index 602c414b49bb2..745678791458d 100644 --- a/ci/circle-35-ascii.yaml +++ b/ci/circle-35-ascii.yaml @@ -2,7 +2,7 @@ name: pandas channels: - defaults dependencies: - - cython + - cython>=0.28.2 - nomkl - numpy - python-dateutil diff --git a/ci/circle-36-locale.yaml b/ci/circle-36-locale.yaml index cc852c1e2aeeb..a85e0b58f5e33 100644 --- a/ci/circle-36-locale.yaml +++ b/ci/circle-36-locale.yaml @@ -13,7 +13,7 @@ dependencies: - nomkl - numexpr - numpy - - openpyxl + - openpyxl=2.5.5 - psycopg2 - pymysql - pytables diff --git a/ci/circle-36-locale_slow.yaml b/ci/circle-36-locale_slow.yaml index cc852c1e2aeeb..a85e0b58f5e33 100644 --- a/ci/circle-36-locale_slow.yaml +++ b/ci/circle-36-locale_slow.yaml @@ -13,7 +13,7 @@ dependencies: - nomkl - numexpr - numpy - - openpyxl + - openpyxl=2.5.5 - psycopg2 - pymysql - pytables diff --git a/ci/install_circle.sh b/ci/install_circle.sh index 5ffff84c88488..f8bcf6bcffc99 100755 --- a/ci/install_circle.sh +++ b/ci/install_circle.sh @@ -6,14 +6,7 @@ echo "[home_dir: $home_dir]" echo "[ls -ltr]" ls -ltr -echo "[Using clean Miniconda install]" -rm -rf "$MINICONDA_DIR" - -# install miniconda -wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 -bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 - -export PATH="$MINICONDA_DIR/bin:$PATH" +apt-get update -y && apt-get install -y build-essential postgresql-client-9.6 echo "[update conda]" conda config --set ssl_verify false || exit 1 @@ -48,9 +41,17 @@ source $ENVS_FILE # edit the locale override if needed if [ -n "$LOCALE_OVERRIDE" ]; then + + apt-get update && apt-get -y install locales locales-all + + export LANG=$LOCALE_OVERRIDE + export LC_ALL=$LOCALE_OVERRIDE + + python -c "import locale; locale.setlocale(locale.LC_ALL, \"$LOCALE_OVERRIDE\")" || exit 1; + echo "[Adding locale to the first line of pandas/__init__.py]" rm -f pandas/__init__.pyc - sedc="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LOCALE_OVERRIDE')\n" + sedc="3iimport locale\nlocale.setlocale(locale.LC_ALL, \"$LOCALE_OVERRIDE\")\n" sed -i "$sedc" pandas/__init__.py echo "[head -4 pandas/__init__.py]" head -4 pandas/__init__.py diff --git a/ci/install_db_circle.sh b/ci/install_db_circle.sh deleted file mode 100755 index a00f74f009f54..0000000000000 --- a/ci/install_db_circle.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo "installing dbs" -mysql -e 'create database pandas_nosetest;' -psql -c 'create database pandas_nosetest;' -U postgres - -echo "done" -exit 0 diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt index e8cfcdf80f2e8..ca60c772392e7 100644 --- a/ci/requirements-optional-conda.txt +++ b/ci/requirements-optional-conda.txt @@ -11,7 +11,7 @@ lxml matplotlib nbsphinx numexpr -openpyxl +openpyxl=2.5.5 pyarrow pymysql pytables diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 877c52fa0b4fd..a6009c270c2a6 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -13,7 +13,7 @@ lxml matplotlib nbsphinx numexpr -openpyxl +openpyxl=2.5.5 pyarrow pymysql tables @@ -26,4 +26,4 @@ sqlalchemy xarray xlrd xlsxwriter -xlwt \ No newline at end of file +xlwt diff --git a/ci/run_circle.sh b/ci/run_circle.sh index 435985bd42148..fc2a8b849a354 100755 --- a/ci/run_circle.sh +++ b/ci/run_circle.sh @@ -6,4 +6,4 @@ export PATH="$MINICONDA_DIR/bin:$PATH" source activate pandas echo "pytest --strict --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas" -pytest --strict --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas +pytest --strict --color=no --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas diff --git a/ci/script_single.sh b/ci/script_single.sh index f376c920ac71b..60e2fbb33ee5d 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -25,12 +25,12 @@ if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then - echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas - pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas + echo pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas + pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas else - echo pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas - pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest + echo pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas + pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest fi diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml index 1312c1296d46a..eacae4630edeb 100644 --- a/ci/travis-27-locale.yaml +++ b/ci/travis-27-locale.yaml @@ -7,7 +7,7 @@ dependencies: - cython=0.24 - lxml - matplotlib=1.4.3 - - numpy=1.9.2 + - numpy=1.9.3 - openpyxl=2.4.0 - python-dateutil - python-blosc diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml index 22b993a2da886..26a520a16a4cc 100644 --- a/ci/travis-27.yaml +++ b/ci/travis-27.yaml @@ -27,6 +27,7 @@ dependencies: - PyCrypto - pymysql=0.6.3 - pytables + - blosc=1.14.3 - python-blosc - python-dateutil=2.5.0 - python=2.7* diff --git a/ci/travis-35-osx.yaml b/ci/travis-35-osx.yaml index e74abac4c9775..5722d91781999 100644 --- a/ci/travis-35-osx.yaml +++ b/ci/travis-35-osx.yaml @@ -12,7 +12,7 @@ dependencies: - nomkl - numexpr - numpy=1.10.4 - - openpyxl + - openpyxl=2.5.5 - pytables - python=3.5* - pytz diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml index c22dddbe0ba3f..05ff26020ac7d 100644 --- a/ci/travis-36-doc.yaml +++ b/ci/travis-36-doc.yaml @@ -21,7 +21,7 @@ dependencies: - notebook - numexpr - numpy=1.13* - - openpyxl + - openpyxl=2.5.5 - pandoc - pyqt - pytables @@ -36,6 +36,7 @@ dependencies: - sphinx - sqlalchemy - statsmodels + - tzlocal - xarray - xlrd - xlsxwriter diff --git a/ci/travis-36-slow.yaml b/ci/travis-36-slow.yaml index 6c475dc48723c..ae6353216cc2d 100644 --- a/ci/travis-36-slow.yaml +++ b/ci/travis-36-slow.yaml @@ -10,7 +10,7 @@ dependencies: - matplotlib - numexpr - numpy - - openpyxl + - openpyxl=2.5.5 - patsy - psycopg2 - pymysql diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml index fe057e714761e..83f963b9d9b6d 100644 --- a/ci/travis-36.yaml +++ b/ci/travis-36.yaml @@ -17,13 +17,11 @@ dependencies: - nomkl - numexpr - numpy - - openpyxl - - pandas-datareader + - openpyxl=2.5.5 - psycopg2 - pyarrow - pymysql - pytables - - python-dateutil - python-snappy - python=3.6* - pytz @@ -45,3 +43,5 @@ dependencies: - pip: - brotlipy - coverage + - pandas-datareader + - python-dateutil diff --git a/ci/travis-37.yaml b/ci/travis-37.yaml new file mode 100644 index 0000000000000..8b255c9e6ec72 --- /dev/null +++ b/ci/travis-37.yaml @@ -0,0 +1,14 @@ +name: pandas +channels: + - defaults + - conda-forge + - c3i_test +dependencies: + - python=3.7 + - cython + - numpy + - python-dateutil + - nomkl + - pytz + - pytest + - pytest-xdist diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 66415defba6fe..0000000000000 --- a/circle.yml +++ /dev/null @@ -1,38 +0,0 @@ -machine: - environment: - # these are globally set - MINICONDA_DIR: /home/ubuntu/miniconda3 - - -database: - override: - - ./ci/install_db_circle.sh - - -checkout: - post: - # since circleci does a shallow fetch - # we need to populate our tags - - git fetch --depth=1000 - - -dependencies: - override: - - > - case $CIRCLE_NODE_INDEX in - 0) - sudo apt-get install language-pack-it && ./ci/install_circle.sh JOB="2.7_COMPAT" ENV_FILE="ci/circle-27-compat.yaml" LOCALE_OVERRIDE="it_IT.UTF-8" ;; - 1) - sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE" ENV_FILE="ci/circle-36-locale.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" ;; - 2) - sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE_SLOW" ENV_FILE="ci/circle-36-locale_slow.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" ;; - 3) - ./ci/install_circle.sh JOB="3.5_ASCII" ENV_FILE="ci/circle-35-ascii.yaml" LOCALE_OVERRIDE="C" ;; - esac - - ./ci/show_circle.sh - - -test: - override: - - case $CIRCLE_NODE_INDEX in 0) ./ci/run_circle.sh --skip-slow --skip-network ;; 1) ./ci/run_circle.sh --only-slow --skip-network ;; 2) ./ci/run_circle.sh --skip-slow --skip-network ;; 3) ./ci/run_circle.sh --skip-slow --skip-network ;; esac: - parallel: true diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c81842d3d9212..ec517d3e07bdf 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -924,6 +924,55 @@ bins, with ``NaN`` representing a missing value similar to other dtypes. pd.cut([0, 3, 5, 1], bins=c.categories) + +Generating Ranges of Intervals +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If we need intervals on a regular frequency, we can use the :func:`interval_range` function +to create an ``IntervalIndex`` using various combinations of ``start``, ``end``, and ``periods``. +The default frequency for ``interval_range`` is a 1 for numeric intervals, and calendar day for +datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, end=5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4) + + pd.interval_range(end=pd.Timedelta('3 days'), periods=3) + +The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety +of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, periods=5, freq=1.5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W') + + pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H') + +Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals +are closed on. Intervals are closed on the right side by default. + +.. ipython:: python + + pd.interval_range(start=0, end=4, closed='both') + + pd.interval_range(start=0, end=4, closed='neither') + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``IntervalIndex``: + +.. ipython:: python + + pd.interval_range(start=0, end=6, periods=4) + + pd.interval_range(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-02-28'), periods=3) + Miscellaneous indexing FAQ -------------------------- diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 30cdb06b28487..6714398084186 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -38,7 +38,10 @@ Statsmodels leverages pandas objects as the underlying data container for comput Use pandas DataFrames in your `scikit-learn <http://scikit-learn.org/>`__ ML pipeline. +`Featuretools <https://github.com/featuretools/featuretools/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. .. _ecosystem.visualization: diff --git a/doc/source/install.rst b/doc/source/install.rst index 6054be112f52c..846170f9f0fa5 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -43,7 +43,7 @@ For more information, see the `Python 3 statement`_ and the `Porting to Python 3 Python version support ---------------------- -Officially Python 2.7, 3.5, and 3.6. +Officially Python 2.7, 3.5, 3.6, and 3.7. Installing pandas ----------------- diff --git a/doc/source/io.rst b/doc/source/io.rst index aa2484b0cb5c3..d818f486ad62d 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4719,14 +4719,6 @@ writes ``data`` to the database in batches of 1000 rows at a time: data.to_sql('data_chunked', engine, chunksize=1000) -.. note:: - - The function :func:`~pandas.DataFrame.to_sql` will perform a multivalue - insert if the engine dialect ``supports_multivalues_insert``. This will - greatly speed up the insert in some cases. - -SQL data types -++++++++++++++ :func:`~pandas.DataFrame.to_sql` will try to map your data to an appropriate SQL data type based on the dtype of the data. When you have columns of dtype diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 1161656731f88..4d7cd0bdadef7 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -279,7 +279,7 @@ need to be: Ignoring indexes on the concatenation axis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For ``DataFrame``s which don't have a meaningful index, you may wish to append +For ``DataFrame`` s which don't have a meaningful index, you may wish to append them and ignore the fact that they may have overlapping indexes. To do this, use the ``ignore_index`` argument: @@ -314,7 +314,7 @@ This is also a valid argument to :meth:`DataFrame.append`: Concatenating with mixed ndims ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can concatenate a mix of ``Series`` and ``DataFrame``s. The +You can concatenate a mix of ``Series`` and ``DataFrame`` s. The ``Series`` will be transformed to ``DataFrame`` with the column name as the name of the ``Series``. diff --git a/doc/source/release.rst b/doc/source/release.rst index 32db2ff5ebb24..08200d4d276cc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,10 +37,91 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: https://pypi.org/project/pandas * Documentation: http://pandas.pydata.org +pandas 0.23.2 +------------- + +**Release date**: July 5, 2018 + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. + +See the :ref:`full whatsnew <whatsnew_0232>` for a list of all the changes. + +Thanks +~~~~~~ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* David Krych +* Jacopo Rota + +* Jeff Reback +* Jeremy Schendel +* Joris Van den Bossche +* Kalyan Gokhale +* Matthew Roeschke +* Michael Odintsov + +* Ming Li +* Pietro Battiston +* Tom Augspurger +* Uddeshya Singh +* Vu Le + +* alimcmaster1 + +* david-liu-brattle-1 + +* gfyoung +* jbrockmendel + +pandas 0.23.1 +------------- + +**Release date**: June 12, 2018 + +This is a minor release from 0.23.0 and includes a number of bug fixes and +performance improvements. + +See the :ref:`full whatsnew <whatsnew_0231>` for a list of all the changes. + +Thanks +~~~~~~ + +A total of 30 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Adam J. Stewart +* Adam Kim + +* Aly Sivji +* Chalmer Lowe + +* Damini Satya + +* Dr. Irv +* Gabe Fernando + +* Giftlin Rajaiah +* Jeff Reback +* Jeremy Schendel + +* Joris Van den Bossche +* Kalyan Gokhale + +* Kevin Sheppard +* Matthew Roeschke +* Max Kanter + +* Ming Li +* Pyry Kovanen + +* Stefano Cianciulli +* Tom Augspurger +* Uddeshya Singh + +* Wenhuan +* William Ayd +* chris-b1 +* gfyoung +* h-vetinari +* nprad + +* ssikdar1 + +* tmnhat2001 +* topper-123 +* zertrin + + pandas 0.23.0 ------------- -**Release date**: May 15, 2017 +**Release date**: May 15, 2018 This is a major release from 0.22.0 and includes a number of API changes, new features, enhancements, and performance improvements along with a large number diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 5f3a01f0725d4..745810704f665 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -352,8 +352,8 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the TimedeltaIndex -------------- -To generate an index with time delta, you can use either the ``TimedeltaIndex`` or -the ``timedelta_range`` constructor. +To generate an index with time delta, you can use either the :class:`TimedeltaIndex` or +the :func:`timedelta_range` constructor. Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``, or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values. @@ -363,13 +363,47 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)]) -Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``: +Generating Ranges of Time Deltas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Similar to :func:`date_range`, you can construct regular ranges of a ``TimedeltaIndex`` +using :func:`timedelta_range`. The default frequency for ``timedelta_range`` is +calendar day: + +.. ipython:: python + + pd.timedelta_range(start='1 days', periods=5) + +Various combinations of ``start``, ``end``, and ``periods`` can be used with +``timedelta_range``: + +.. ipython:: python + + pd.timedelta_range(start='1 days', end='5 days') + + pd.timedelta_range(end='10 days', periods=4) + +The ``freq`` parameter can passed a variety of :ref:`frequency aliases <timeseries.offset_aliases>`: .. ipython:: python - pd.timedelta_range(start='1 days', periods=5, freq='D') pd.timedelta_range(start='1 days', end='2 days', freq='30T') + pd.timedelta_range(start='1 days', periods=5, freq='2D5H') + + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +timedeltas from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``TimedeltaIndex``: + +.. ipython:: python + + pd.timedelta_range('0 days', '4 days', periods=5) + + pd.timedelta_range('0 days', '4 days', periods=10) + Using the TimedeltaIndex ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 73e3e721aad71..1b0cf86995a39 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -393,6 +393,18 @@ of those specified will not be generated: pd.bdate_range(start=start, periods=20) +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +dates from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the +resulting ``DatetimeIndex``: + +.. ipython:: python + + pd.date_range('2018-01-01', '2018-01-05', periods=5) + + pd.date_range('2018-01-01', '2018-01-05', periods=10) + .. _timeseries.custom-freq-ranges: Custom Frequency Ranges diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index d61a98fe2dae4..afd274332b3df 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,12 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.23.3.txt + +.. include:: whatsnew/v0.23.2.txt + +.. include:: whatsnew/v0.23.1.txt + .. include:: whatsnew/v0.23.0.txt .. include:: whatsnew/v0.22.0.txt diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f89de1dc22d8..feba9d856789b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1,6 +1,6 @@ .. _whatsnew_0230: -v0.23.0 (May 15, 2017) +v0.23.0 (May 15, 2018) ---------------------- This is a major release from 0.22.0 and includes a number of API changes, diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt new file mode 100644 index 0000000000000..9f8635743ea6a --- /dev/null +++ b/doc/source/whatsnew/v0.23.1.txt @@ -0,0 +1,140 @@ +.. _whatsnew_0231: + +v0.23.1 +------- + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + +.. contents:: What's new in v0.23.1 + :local: + :backlinks: none + +.. _whatsnew_0231.fixed_regressions: + +Fixed Regressions +~~~~~~~~~~~~~~~~~ + +**Comparing Series with datetime.date** + +We've reverted a 0.23.0 change to comparing a :class:`Series` holding datetimes and a ``datetime.date`` object (:issue:`21152`). +In pandas 0.22 and earlier, comparing a Series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comapring. +This was inconsistent with Python, NumPy, and :class:`DatetimeIndex`, which never consider a datetime and ``datetime.date`` equal. + +In 0.23.0, we unified operations between DatetimeIndex and Series, and in the process changed comparisons between a Series of datetimes and ``datetime.date`` without warning. + +We've temporarily restored the 0.22.0 behavior, so datetimes and dates may again compare equal, but restore the 0.23.0 behavior in a future release. + +To summarize, here's the behavior in 0.22.0, 0.23.0, 0.23.1: + +.. code-block:: python + + # 0.22.0... Silently coerce the datetime.date + >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1) + 0 True + 1 False + dtype: bool + + # 0.23.0... Do not coerce the datetime.date + >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1) + 0 False + 1 False + dtype: bool + + # 0.23.1... Coerce the datetime.date with a warning + >>> Series(pd.date_range('2017', periods=2)) == datetime.date(2017, 1, 1) + /bin/python:1: FutureWarning: Comparing Series of datetimes with 'datetime.date'. Currently, the + 'datetime.date' is coerced to a datetime. In the future pandas will + not coerce, and the values not compare equal to the 'datetime.date'. + To retain the current behavior, convert the 'datetime.date' to a + datetime with 'pd.Timestamp'. + #!/bin/python3 + 0 True + 1 False + dtype: bool + +In addition, ordering comparisons will raise a ``TypeError`` in the future. + +**Other Fixes** + +- Reverted the ability of :func:`~DataFrame.to_sql` to perform multivalue + inserts as this caused regression in certain cases (:issue:`21103`). + In the future this will be made configurable. +- Fixed regression in the :attr:`DatetimeIndex.date` and :attr:`DatetimeIndex.time` + attributes in case of timezone-aware data: :attr:`DatetimeIndex.time` returned + a tz-aware time instead of tz-naive (:issue:`21267`) and :attr:`DatetimeIndex.date` + returned incorrect date when the input date has a non-UTC timezone (:issue:`21230`). +- Fixed regression in :meth:`pandas.io.json.json_normalize` when called with ``None`` values + in nested levels in JSON, and to not drop keys with value as `None` (:issue:`21158`, :issue:`21356`). +- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`) +- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`) +- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`) +- Fixed regression in constructors coercing NA values like ``None`` to strings when passing ``dtype=str`` (:issue:`21083`) +- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing + values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`) +- Fixed regression in merging on boolean index/columns (:issue:`21119`). + +.. _whatsnew_0231.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) +- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`) + + +.. _whatsnew_0231.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Groupby/Resample/Rolling** + +- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) +- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) +- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True`` +- Bug in :func:`pandas.DataFrame.rolling` and :func:`pandas.Series.rolling` which incorrectly accepted a 0 window size rather than raising (:issue:`21286`) + +**Data-type specific** + +- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue:`21078`) +- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue:`14156`) +- Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`) + +**Sparse** + +- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`) + +**Indexing** + +- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) +- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`) +- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`) +- Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, :issue:`21253`) +- Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`) + +**Plotting** + +- New keywords (sharex, sharey) to turn on/off sharing of x/y-axis by subplots generated with pandas.DataFrame().groupby().boxplot() (:issue:`20968`) + +**I/O** + +- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) +- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) +- Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`) +- Bug in IO JSON :func:`read_json` reading empty JSON schema with ``orient='table'`` back to :class:`DataFrame` caused an error (:issue:`21287`) + +**Reshaping** + +- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) +- Bug in :func:`concat` warning message providing the wrong guidance for future behavior (:issue:`21101`) + +**Other** + +- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) +- Bug preventing pandas being used on Windows without C++ redistributable installed (:issue:`21106`) diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt new file mode 100644 index 0000000000000..77ad860fc4e8e --- /dev/null +++ b/doc/source/whatsnew/v0.23.2.txt @@ -0,0 +1,108 @@ +.. _whatsnew_0232: + +v0.23.2 +------- + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. note:: + + Pandas 0.23.2 is first pandas release that's compatible with + Python 3.7 (:issue:`20552`) + +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + +.. contents:: What's new in v0.23.2 + :local: + :backlinks: none + +.. _whatsnew_0232.enhancements: + +Logical Reductions over Entire DataFrame +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:meth:`DataFrame.all` and :meth:`DataFrame.any` now accept ``axis=None`` to reduce over all axes to a scalar (:issue:`19976`) + +.. ipython:: python + + df = pd.DataFrame({"A": [1, 2], "B": [True, False]}) + df.all(axis=None) + + +This also provides compatibility with NumPy 1.15, which now dispatches to ``DataFrame.all``. +With NumPy 1.15 and pandas 0.23.1 or earlier, :func:`numpy.all` will no longer reduce over every axis: + +.. code-block:: python + + >>> # NumPy 1.15, pandas 0.23.1 + >>> np.any(pd.DataFrame({"A": [False], "B": [False]})) + A False + B False + dtype: bool + +With pandas 0.23.2, that will correctly return False, as it did with NumPy < 1.15. + +.. ipython:: python + + np.any(pd.DataFrame({"A": [False], "B": [False]})) + + +.. _whatsnew_0232.fixed_regressions: + +Fixed Regressions +~~~~~~~~~~~~~~~~~ + +- Fixed regression in :meth:`to_csv` when handling file-like object incorrectly (:issue:`21471`) +- Re-allowed duplicate level names of a ``MultiIndex``. Accessing a level that has a duplicate name by name still raises an error (:issue:`19029`). +- Bug in both :meth:`DataFrame.first_valid_index` and :meth:`Series.first_valid_index` raised for a row index having duplicate values (:issue:`21441`) +- Fixed printing of DataFrames with hierarchical columns with long names (:issue:`21180`) +- Fixed regression in :meth:`~DataFrame.reindex` and :meth:`~DataFrame.groupby` + with a MultiIndex or multiple keys that contains categorical datetime-like values (:issue:`21390`). +- Fixed regression in unary negative operations with object dtype (:issue:`21380`) +- Bug in :meth:`Timestamp.ceil` and :meth:`Timestamp.floor` when timestamp is a multiple of the rounding frequency (:issue:`21262`) +- Fixed regression in :func:`to_clipboard` that defaulted to copying dataframes with space delimited instead of tab delimited (:issue:`21104`) + + +Build Changes +~~~~~~~~~~~~~ + +- The source and binary distributions no longer include test data files, resulting in smaller download sizes. Tests relying on these data files will be skipped when using ``pandas.test()``. (:issue:`19320`) + +.. _whatsnew_0232.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Conversion** + +- Bug in constructing :class:`Index` with an iterator or generator (:issue:`21470`) +- Bug in :meth:`Series.nlargest` for signed and unsigned integer dtypes when the minimum value is present (:issue:`21426`) + +**Indexing** + +- Bug in :meth:`Index.get_indexer_non_unique` with categorical key (:issue:`21448`) +- Bug in comparison operations for :class:`MultiIndex` where error was raised on equality / inequality comparison involving a MultiIndex with ``nlevels == 1`` (:issue:`21149`) +- Bug in :meth:`DataFrame.drop` behaviour is not consistent for unique and non-unique indexes (:issue:`21494`) +- Bug in :func:`DataFrame.duplicated` with a large number of columns causing a 'maximum recursion depth exceeded' (:issue:`21524`). + +**I/O** + +- Bug in :func:`read_csv` that caused it to incorrectly raise an error when ``nrows=0``, ``low_memory=True``, and ``index_col`` was not ``None`` (:issue:`21141`) +- Bug in :func:`json_normalize` when formatting the ``record_prefix`` with integer columns (:issue:`21536`) + +**Categorical** + +- Bug in rendering :class:`Series` with ``Categorical`` dtype in rare conditions under Python 2.7 (:issue:`21002`) + +**Timezones** + +- Bug in :class:`Timestamp` and :class:`DatetimeIndex` where passing a :class:`Timestamp` localized after a DST transition would return a datetime before the DST transition (:issue:`20854`) +- Bug in comparing :class:`DataFrame`s with tz-aware :class:`DatetimeIndex` columns with a DST transition that raised a ``KeyError`` (:issue:`19970`) + +**Timedelta** + +- Bug in :class:`Timedelta` where non-zero timedeltas shorter than 1 microsecond were considered False (:issue:`21484`) diff --git a/doc/source/whatsnew/v0.23.3.txt b/doc/source/whatsnew/v0.23.3.txt new file mode 100644 index 0000000000000..b8adce27d2523 --- /dev/null +++ b/doc/source/whatsnew/v0.23.3.txt @@ -0,0 +1,7 @@ +.. _whatsnew_0233: + +v0.23.3 (July 7, 2018) +---------------------- + +This release fixes a build issue with the sdist for Python 3.7 (:issue:`21785`) +There are no other changes. diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt new file mode 100644 index 0000000000000..9a3ad3f61ee49 --- /dev/null +++ b/doc/source/whatsnew/v0.23.4.txt @@ -0,0 +1,37 @@ +.. _whatsnew_0234: + +v0.23.4 (August 3, 2018) +------------------------ + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + +.. contents:: What's new in v0.23.4 + :local: + :backlinks: none + +.. _whatsnew_0234.fixed_regressions: + +Fixed Regressions +~~~~~~~~~~~~~~~~~ + +- Python 3.7 with Windows gave all missing values for rolling variance calculations (:issue:`21813`) + +.. _whatsnew_0234.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Groupby/Resample/Rolling** + +- Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) +- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) + +**Missing** + +- Bug in :func:`Series.clip` and :func:`DataFrame.clip` cannot accept list-like threshold containing ``NaN`` (:issue:`19992`) diff --git a/doc/source/whatsnew/v0.23.5.txt b/doc/source/whatsnew/v0.23.5.txt new file mode 100644 index 0000000000000..f69e38e7fdd50 --- /dev/null +++ b/doc/source/whatsnew/v0.23.5.txt @@ -0,0 +1,46 @@ +.. _whatsnew_0235: + +v0.23.5 (TBD 0, 2018) +--------------------- + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + +.. contents:: What's new in v0.23.5 + :local: + :backlinks: none + +.. _whatsnew_0235.fixed_regressions: + +Fixed Regressions +~~~~~~~~~~~~~~~~~ + +- Calling :meth:`DataFrameGroupBy.rank` and :meth:`SeriesGroupBy.rank` with empty groups + and ``pct=True`` was raising a ``ZeroDivisionError`` due to `c1068d9 + <https://github.com/pandas-dev/pandas/commit/c1068d9d242c22cb2199156f6fb82eb5759178ae>`_ (:issue:`22519`) +- +- + +.. _whatsnew_0235.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Groupby/Resample/Rolling** + +- Bug in :meth:`DataFrame.resample` when resampling ``NaT`` in ``TimeDeltaIndex`` (:issue:`13223`). +- + +**Missing** + +- +- + +**I/O** + +- Bug in :func:`read_csv` that caused it to raise ``OverflowError`` when trying to use 'inf' as ``na_value`` with integer index column (:issue:`17128`) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 43afd1e0f5969..a6dbaff17e543 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -297,7 +297,8 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, # Make sure all arrays are the same size assert N == len(labels) == len(mask) - sorted_labels = np.argsort(labels).astype(np.int64, copy=False) + sorted_labels = np.argsort(labels, kind='mergesort').astype( + np.int64, copy=False) if direction == 'bfill': sorted_labels = sorted_labels[::-1] diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 6a33e4a09476d..d7885e112a7e0 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -418,7 +418,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, bint is_datetimelike, object ties_method, bint ascending, bint pct, object na_option): """ - Provides the rank of values within each group. + Provides the rank of values within each group. Parameters ---------- @@ -451,8 +451,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, """ cdef: TiebreakEnumType tiebreak - Py_ssize_t i, j, N, K, val_start=0, grp_start=0, dups=0, sum_ranks=0 - Py_ssize_t grp_vals_seen=1, grp_na_count=0 + Py_ssize_t i, j, N, K, grp_start=0, dups=0, sum_ranks=0 + Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0 ndarray[int64_t] _as ndarray[float64_t, ndim=2] grp_sizes ndarray[{{c_type}}] masked_vals @@ -563,6 +563,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, dups = sum_ranks = 0 val_start = i grp_vals_seen += 1 + grp_tie_count +=1 # Similar to the previous conditional, check now if we are moving # to a new group. If so, keep track of the index where the new @@ -571,17 +572,27 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # (used by pct calculations later). also be sure to reset any of # the items helping to calculate dups if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]: - for j in range(grp_start, i + 1): - grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count + if tiebreak != TIEBREAK_DENSE: + for j in range(grp_start, i + 1): + grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count + else: + for j in range(grp_start, i + 1): + grp_sizes[_as[j], 0] = (grp_tie_count - + (grp_na_count > 0)) dups = sum_ranks = 0 grp_na_count = 0 - val_start = i + 1 + grp_tie_count = 0 grp_start = i + 1 grp_vals_seen = 1 if pct: for i in range(N): - out[i, 0] = out[i, 0] / grp_sizes[i, 0] + # We don't include NaN values in percentage + # rankings, so we assign them percentages of NaN. + if out[i, 0] != out[i, 0] or out[i, 0] == NAN: + out[i, 0] = NAN + else: + out[i, 0] = out[i, 0] / grp_sizes[i, 0] {{endif}} {{endfor}} diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index c6f182ac5003f..4489847518a1d 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -8,8 +8,7 @@ import numpy as np from numpy cimport ndarray, uint8_t, uint32_t, uint64_t from util cimport _checknull -from cpython cimport (PyString_Check, - PyBytes_Check, +from cpython cimport (PyBytes_Check, PyUnicode_Check) from libc.stdlib cimport malloc, free @@ -62,9 +61,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'): cdef list datas = [] for i in range(n): val = arr[i] - if PyString_Check(val): - data = <bytes>val.encode(encoding) - elif PyBytes_Check(val): + if PyBytes_Check(val): data = <bytes>val elif PyUnicode_Check(val): data = <bytes>val.encode(encoding) diff --git a/pandas/_libs/src/headers/cmath b/pandas/_libs/src/headers/cmath index d8e2239406cae..2bccf9bb13d77 100644 --- a/pandas/_libs/src/headers/cmath +++ b/pandas/_libs/src/headers/cmath @@ -6,6 +6,7 @@ #if defined(_MSC_VER) && (_MSC_VER < 1800) #include <cmath> namespace std { + __inline int isnan(double x) { return _isnan(x); } __inline int signbit(double num) { return _copysign(1.0, num) < 0; } } #else diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 17453d8af1297..0f58cfa761f21 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -77,7 +77,7 @@ cdef inline object create_time_from_ts( int64_t value, pandas_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.time from its parts """ - return time(dts.hour, dts.min, dts.sec, dts.us, tz) + return time(dts.hour, dts.min, dts.sec, dts.us) def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index f4841e6abb7e8..3cbef82437544 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -347,25 +347,11 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, if tz is not None: tz = maybe_get_tz(tz) - # sort of a temporary hack if ts.tzinfo is not None: - if hasattr(tz, 'normalize') and hasattr(ts.tzinfo, '_utcoffset'): - ts = tz.normalize(ts) - obj.value = pydatetime_to_dt64(ts, &obj.dts) - obj.tzinfo = ts.tzinfo - else: - # tzoffset - try: - tz = ts.astimezone(tz).tzinfo - except: - pass - obj.value = pydatetime_to_dt64(ts, &obj.dts) - ts_offset = get_utcoffset(ts.tzinfo, ts) - obj.value -= int(ts_offset.total_seconds() * 1e9) - tz_offset = get_utcoffset(tz, ts) - obj.value += int(tz_offset.total_seconds() * 1e9) - dt64_to_dtstruct(obj.value, &obj.dts) - obj.tzinfo = tz + # Convert the current timezone to the passed timezone + ts = ts.astimezone(tz) + obj.value = pydatetime_to_dt64(ts, &obj.dts) + obj.tzinfo = ts.tzinfo elif not is_utc(tz): ts = _localize_pydatetime(ts, tz) obj.value = pydatetime_to_dt64(ts, &obj.dts) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index d17d4e7139d72..769f3ca5fa8bf 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -202,22 +202,22 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: if unit == 'D' or unit == 'd': m = 1000000000L * 86400 - p = 6 + p = 9 elif unit == 'h': m = 1000000000L * 3600 - p = 6 + p = 9 elif unit == 'm': m = 1000000000L * 60 - p = 6 + p = 9 elif unit == 's': m = 1000000000L - p = 6 + p = 9 elif unit == 'ms': m = 1000000L - p = 3 + p = 6 elif unit == 'us': m = 1000L - p = 0 + p = 3 elif unit == 'ns' or unit is None: m = 1L p = 0 @@ -231,10 +231,10 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: # cast the unit, multiply base/frace separately # to avoid precision issues from float -> int base = <int64_t> ts - frac = ts -base + frac = ts - base if p: frac = round(frac, p) - return <int64_t> (base *m) + <int64_t> (frac *m) + return <int64_t> (base * m) + <int64_t> (frac * m) cdef inline _decode_if_necessary(object ts): @@ -760,7 +760,32 @@ cdef class _Timedelta(timedelta): @property def delta(self): - """ return out delta in ns (for internal compat) """ + """ + Return the timedelta in nanoseconds (ns), for internal compatibility. + + Returns + ------- + int + Timedelta in nanoseconds. + + Examples + -------- + >>> td = pd.Timedelta('1 days 42 ns') + >>> td.delta + 86400000000042 + + >>> td = pd.Timedelta('3 s') + >>> td.delta + 3000000000 + + >>> td = pd.Timedelta('3 ms 5 us') + >>> td.delta + 3005000 + + >>> td = pd.Timedelta(42, unit='ns') + >>> td.delta + 42 + """ return self.value @property @@ -791,9 +816,32 @@ cdef class _Timedelta(timedelta): @property def nanoseconds(self): """ - Number of nanoseconds (>= 0 and less than 1 microsecond). + Return the number of nanoseconds (n), where 0 <= n < 1 microsecond. + + Returns + ------- + int + Number of nanoseconds. - .components will return the shown components + See Also + -------- + Timedelta.components : Return all attributes with assigned values + (i.e. days, hours, minutes, seconds, milliseconds, microseconds, + nanoseconds). + + Examples + -------- + **Using string input** + + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.nanoseconds + 42 + + **Using integer input** + + >>> td = pd.Timedelta(42, unit='ns') + >>> td.nanoseconds + 42 """ self._ensure_components() return self._ns @@ -851,6 +899,9 @@ cdef class _Timedelta(timedelta): def __str__(self): return self._repr_base(format='long') + def __bool__(self): + return self.value != 0 + def isoformat(self): """ Format Timedelta as ISO 8601 Duration like @@ -1198,7 +1249,7 @@ class Timedelta(_Timedelta): deprecated. Use 'array // timedelta.value' instead. If you want to obtain epochs from an array of timestamps, you can rather use - 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. + '(array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. """) warnings.warn(msg, FutureWarning) return other // self.value diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index ba5ebdab82ddc..123ccebf83a56 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -59,42 +59,51 @@ cdef inline object create_timestamp_from_ts(int64_t value, def round_ns(values, rounder, freq): + """ Applies rounding function at given frequency Parameters ---------- - values : int, :obj:`ndarray` - rounder : function + values : :obj:`ndarray` + rounder : function, eg. 'ceil', 'floor', 'round' freq : str, obj Returns ------- - int or :obj:`ndarray` + :obj:`ndarray` """ + from pandas.tseries.frequencies import to_offset unit = to_offset(freq).nanos + + # GH21262 If the Timestamp is multiple of the freq str + # don't apply any rounding + mask = values % unit == 0 + if mask.all(): + return values + r = values.copy() + if unit < 1000: # for nano rounding, work with the last 6 digits separately # due to float precision buff = 1000000 - r = (buff * (values // buff) + unit * - (rounder((values % buff) * (1 / float(unit)))).astype('i8')) + r[~mask] = (buff * (values[~mask] // buff) + + unit * (rounder((values[~mask] % buff) * + (1 / float(unit)))).astype('i8')) else: if unit % 1000 != 0: msg = 'Precision will be lost using frequency: {}' warnings.warn(msg.format(freq)) - # GH19206 # to deal with round-off when unit is large if unit >= 1e9: divisor = 10 ** int(np.log10(unit / 1e7)) else: divisor = 10 - - r = (unit * rounder((values * (divisor / float(unit))) / divisor) - .astype('i8')) - + r[~mask] = (unit * rounder((values[~mask] * + (divisor / float(unit))) / divisor) + .astype('i8')) return r @@ -649,7 +658,10 @@ class Timestamp(_Timestamp): else: value = self.value - r = round_ns(value, rounder, freq) + value = np.array([value], dtype=np.int64) + + # Will only ever contain 1 element for timestamp + r = round_ns(value, rounder, freq)[0] result = Timestamp(r, unit='ns') if self.tz is not None: result = result.tz_localize(self.tz) diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 5121d293efcb6..6954094b46e69 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -14,6 +14,7 @@ cnp.import_array() cdef extern from "../src/headers/cmath" namespace "std": + bint isnan(double) nogil int signbit(double) nogil double sqrt(double x) nogil @@ -654,16 +655,16 @@ cdef inline void add_var(double val, double *nobs, double *mean_x, double *ssqdm_x) nogil: """ add a value from the var calc """ cdef double delta + # `isnan` instead of equality as fix for GH-21813, msvc 2017 bug + if isnan(val): + return - # Not NaN - if val == val: - nobs[0] = nobs[0] + 1 - - # a part of Welford's method for the online variance-calculation - # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - delta = val - mean_x[0] - mean_x[0] = mean_x[0] + delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0] + nobs[0] = nobs[0] + 1 + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] + mean_x[0] = mean_x[0] + delta / nobs[0] + ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0] cdef inline void remove_var(double val, double *nobs, double *mean_x, @@ -1482,6 +1483,8 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, else: output[i] = NaN + skiplist_destroy(skiplist) + return output diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 12517372fedd1..28a55133e68aa 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -40,10 +40,11 @@ from collections import namedtuple PY2 = sys.version_info[0] == 2 -PY3 = (sys.version_info[0] >= 3) -PY35 = (sys.version_info >= (3, 5)) -PY36 = (sys.version_info >= (3, 6)) -PYPY = (platform.python_implementation() == 'PyPy') +PY3 = sys.version_info[0] >= 3 +PY35 = sys.version_info >= (3, 5) +PY36 = sys.version_info >= (3, 6) +PY37 = sys.version_info >= (3, 7) +PYPY = platform.python_implementation() == 'PyPy' try: import __builtin__ as builtins @@ -425,7 +426,7 @@ def raise_with_traceback(exc, traceback=Ellipsis): # In Python 3.7, the private re._pattern_type is removed. # Python 3.5+ have typing.re.Pattern -if PY35: +if PY36: import typing re_type = typing.re.Pattern else: diff --git a/pandas/conftest.py b/pandas/conftest.py index b09cb872a12fb..ead357747666d 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1,5 +1,8 @@ +import os + import pytest +import pandas import numpy as np import pandas as pd from pandas.compat import PY3 @@ -15,6 +18,8 @@ def pytest_addoption(parser): help="run high memory tests") parser.addoption("--only-slow", action="store_true", help="run only slow tests") + parser.addoption("--strict-data-files", action="store_true", + help="Fail if a test is skipped for missing data file.") def pytest_runtest_setup(item): @@ -105,6 +110,16 @@ def compression(request): return request.param +@pytest.fixture(params=['gzip', 'bz2', 'zip', + pytest.param('xz', marks=td.skip_if_no_lzma)]) +def compression_only(request): + """ + Fixture for trying common compression types in compression tests excluding + uncompressed case + """ + return request.param + + @pytest.fixture(scope='module') def datetime_tz_utc(): from datetime import timezone @@ -119,6 +134,51 @@ def join_type(request): return request.param +@pytest.fixture +def datapath(request): + """Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path : path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --strict-data-files option is set. + """ + def deco(*args): + path = os.path.join('pandas', 'tests', *args) + if not os.path.exists(path): + if request.config.getoption("--strict-data-files"): + msg = "Could not find file {} and --strict-data-files is set." + raise ValueError(msg.format(path)) + else: + msg = "Could not find {}." + pytest.skip(msg.format(path)) + return path + return deco + + +@pytest.fixture +def iris(datapath): + """The iris dataset as a DataFrame.""" + return pandas.read_csv(datapath('data', 'iris.csv')) + + +@pytest.fixture(params=['nlargest', 'nsmallest']) +def nselect_method(request): + """ + Fixture for trying all nselect methods + """ + return request.param + + @pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')]) def nulls_fixture(request): """ @@ -149,3 +209,77 @@ def tz_aware_fixture(request): Fixture for trying explicit timezones: {0} """ return request.param + + +@pytest.fixture(params=[str, 'str', 'U']) +def string_dtype(request): + """Parametrized fixture for string dtypes. + + * str + * 'str' + * 'U' + """ + return request.param + + +@pytest.fixture(params=["float32", "float64"]) +def float_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float32 + * float64 + """ + + return request.param + + +UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] +SIGNED_INT_DTYPES = ["int8", "int16", "int32", "int64"] +ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES + + +@pytest.fixture(params=SIGNED_INT_DTYPES) +def sint_dtype(request): + """ + Parameterized fixture for signed integer dtypes. + + * int8 + * int16 + * int32 + * int64 + """ + + return request.param + + +@pytest.fixture(params=UNSIGNED_INT_DTYPES) +def uint_dtype(request): + """ + Parameterized fixture for unsigned integer dtypes. + + * uint8 + * uint16 + * uint32 + * uint64 + """ + + return request.param + + +@pytest.fixture(params=ALL_INT_DTYPES) +def any_int_dtype(request): + """ + Parameterized fixture for any integer dtypes. + + * int8 + * uint8 + * int16 + * uint16 + * int32 + * uint32 + * int64 + * uint64 + """ + + return request.param diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c638b9e4ea117..7a853d575aa69 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -12,7 +12,8 @@ class DirNamesMixin(object): _accessors = frozenset([]) - _deprecations = frozenset(['asobject']) + _deprecations = frozenset( + ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 88bc497f9f22d..9d8d208d2d5c1 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -95,7 +95,7 @@ def _ensure_data(values, dtype=None): values = _ensure_float64(values) return values, 'float64', 'float64' - except (TypeError, ValueError): + except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return _ensure_object(values), 'object', 'object' @@ -429,7 +429,7 @@ def isin(comps, values): values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) - except (TypeError, ValueError): + except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) @@ -1131,9 +1131,12 @@ def compute(self, method): return dropped[slc].sort_values(ascending=ascending).head(n) # fast method - arr, _, _ = _ensure_data(dropped.values) + arr, pandas_dtype, _ = _ensure_data(dropped.values) if method == 'nlargest': arr = -arr + if is_integer_dtype(pandas_dtype): + # GH 21426: ensure reverse ordering at boundaries + arr -= 1 if self.keep == 'last': arr = arr[::-1] diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index abcb9ae3494b5..b587a4c0bc722 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -3,7 +3,6 @@ import numpy as np from warnings import warn import textwrap -import types from pandas import compat from pandas.compat import u, lzip @@ -12,6 +11,7 @@ from pandas.core.dtypes.generic import ( ABCSeries, ABCIndexClass, ABCCategoricalIndex) from pandas.core.dtypes.missing import isna, notna +from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.cast import ( maybe_infer_to_datetimelike, coerce_indexer_dtype) @@ -27,7 +27,7 @@ is_categorical, is_categorical_dtype, is_list_like, is_sequence, - is_scalar, + is_scalar, is_iterator, is_dict_like) from pandas.core.algorithms import factorize, take_1d, unique1d, take @@ -1751,7 +1751,7 @@ def fillna(self, value=None, method=None, limit=None): values[indexer] = values_codes[values_codes != -1] # If value is not a dict or Series it should be a scalar - elif is_scalar(value): + elif is_hashable(value): if not isna(value) and value not in self.categories: raise ValueError("fill value must be in categories") @@ -2472,7 +2472,7 @@ def _convert_to_list_like(list_like): if isinstance(list_like, list): return list_like if (is_sequence(list_like) or isinstance(list_like, tuple) or - isinstance(list_like, types.GeneratorType)): + is_iterator(list_like)): return list(list_like) elif is_scalar(list_like): return [list_like] diff --git a/pandas/core/base.py b/pandas/core/base.py index fa78c89ed4ee7..c331ead8d2fef 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -114,7 +114,7 @@ def _reset_cache(self, key=None): def __sizeof__(self): """ - Generates the total memory usage for a object that returns + Generates the total memory usage for an object that returns either a value or Series of values """ if hasattr(self, 'memory_usage'): @@ -590,9 +590,10 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # multiples else: - for col in obj: + for index, col in enumerate(obj): try: - colg = self._gotitem(col, ndim=1, subset=obj[col]) + colg = self._gotitem(col, ndim=1, + subset=obj.iloc[:, index]) results.append(colg.aggregate(arg)) keys.append(col) except (TypeError, DataError): @@ -675,7 +676,6 @@ def _gotitem(self, key, ndim, subset=None): subset : object, default None subset to act on """ - # create a new object to prevent aliasing if subset is None: subset = self.obj diff --git a/pandas/core/common.py b/pandas/core/common.py index b9182bfd2cbe2..1de8269c9a0c6 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -55,8 +55,11 @@ def flatten(l): def _consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: - if obj.name != name: - return None + try: + if obj.name != name: + name = None + except ValueError: + name = None return name diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e4ed6d544d42e..ebc7a13234a98 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1227,3 +1227,45 @@ def construct_1d_object_array_from_listlike(values): result = np.empty(len(values), dtype='object') result[:] = values return result + + +def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False): + """ + Construct a new ndarray, coercing `values` to `dtype`, preserving NA. + + Parameters + ---------- + values : Sequence + dtype : numpy.dtype, optional + copy : bool, default False + Note that copies may still be made with ``copy=False`` if casting + is required. + + Returns + ------- + arr : ndarray[dtype] + + Examples + -------- + >>> np.array([1.0, 2.0, None], dtype='str') + array(['1.0', '2.0', 'None'], dtype='<U4') + + >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str') + + + """ + subarr = np.array(values, dtype=dtype, copy=copy) + + if dtype is not None and dtype.kind in ("U", "S"): + # GH-21083 + # We can't just return np.array(subarr, dtype='str') since + # NumPy will convert the non-string objects into strings + # Including NA values. Se we have to go + # string -> object -> update NA, which requires an + # additional pass over the data. + na_values = isna(values) + subarr2 = subarr.astype(object) + subarr2[na_values] = np.asarray(values, dtype=object)[na_values] + subarr = subarr2 + + return subarr diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..2a40dd28a6fd7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1774,8 +1774,11 @@ def to_stata(self, fname, convert_dates=None, write_index=True, Parameters ---------- - fname : str or buffer - String path of file-like object. + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', @@ -3718,7 +3721,7 @@ def rename(self, *args, **kwargs): copy : boolean, default True Also copy underlying data inplace : boolean, default False - Whether to return a new %(klass)s. If True then value of copy is + Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified @@ -4454,7 +4457,10 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) labels = self._get_axis(axis) - if level: + # make sure that the axis is lexsorted to start + # if not we need to reconstruct to get the correct indexer + labels = labels._sort_levels_monotonic() + if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) @@ -4462,9 +4468,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer - # make sure that the axis is lexsorted to start - # if not we need to reconstruct to get the correct indexer - labels = labels._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_labels_for_sorting(), orders=ascending, na_position=na_position) @@ -5731,7 +5734,12 @@ def diff(self, periods=1, axis=0): # ---------------------------------------------------------------------- # Function application - def _gotitem(self, key, ndim, subset=None): + def _gotitem(self, + key, # type: Union[str, List[str]] + ndim, # type: int + subset=None # type: Union[Series, DataFrame, None] + ): + # type: (...) -> Union[Series, DataFrame] """ sub-classes to define return a sliced object @@ -5746,9 +5754,11 @@ def _gotitem(self, key, ndim, subset=None): """ if subset is None: subset = self + elif subset.ndim == 1: # is Series + return subset # TODO: _shallow_copy(subset)? - return self[key] + return subset[key] _agg_doc = dedent(""" The aggregation operations are always performed over an axis, either the @@ -6834,13 +6844,18 @@ def _count_level(self, level, axis=0, numeric_only=False): def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): - axis = self._get_axis_number(axis) + if axis is None and filter_type == 'bool': + labels = None + constructor = None + else: + # TODO: Make other agg func handle axis=None properly + axis = self._get_axis_number(axis) + labels = self._get_agg_axis(axis) + constructor = self._constructor def f(x): return op(x, axis=axis, skipna=skipna, **kwds) - labels = self._get_agg_axis(axis) - # exclude timedelta/datetime unless we are uniform types if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type: numeric_only = True @@ -6849,6 +6864,13 @@ def f(x): try: values = self.values result = f(values) + + if (filter_type == 'bool' and is_object_dtype(values) and + axis is None): + # work around https://github.com/numpy/numpy/issues/10489 + # TODO: combine with hasattr(result, 'dtype') further down + # hard since we don't have `values` down there. + result = np.bool_(result) except Exception as e: # try by-column first @@ -6915,7 +6937,9 @@ def f(x): if axis == 0: result = coerce_to_dtypes(result, self.dtypes) - return Series(result, index=labels) + if constructor is not None: + result = Series(result, index=labels) + return result def nunique(self, axis=0, dropna=True): """ @@ -7079,6 +7103,9 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, 0 <= q <= 1, the quantile(s) to compute axis : {0, 1, 'index', 'columns'} (default 0) 0 or 'index' for row-wise, 1 or 'columns' for column-wise + numeric_only : boolean, default True + If False, the quantile of datetime and timedelta data will be + computed as well interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 @@ -7106,7 +7133,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), - columns=['a', 'b']) + columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 @@ -7116,6 +7143,20 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, 0.1 1.3 3.7 0.5 2.5 55.0 + Specifying `numeric_only=False` will also compute the quantile of + datetime and timedelta data. + + >>> df = pd.DataFrame({'A': [1, 2], + 'B': [pd.Timestamp('2010'), + pd.Timestamp('2011')], + 'C': [pd.Timedelta('1 days'), + pd.Timedelta('2 days')]}) + >>> df.quantile(0.5, numeric_only=False) + A 1.5 + B 2010-07-02 12:00:00 + C 1 days 12:00:00 + Name: 0.5, dtype: object + See Also -------- pandas.core.window.Rolling.quantile diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9e4eda1bc4dc7..facc709877285 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -27,6 +27,7 @@ is_dict_like, is_re_compilable, is_period_arraylike, + is_object_dtype, pandas_dtype) from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask from pandas.core.dtypes.inference import is_hashable @@ -1117,7 +1118,8 @@ def __neg__(self): values = com._values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) - elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)): + elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) + or is_object_dtype(values)): arr = operator.neg(values) else: raise TypeError("Unary negative expects numeric dtype, not {}" @@ -1128,7 +1130,8 @@ def __pos__(self): values = com._values_from_object(self) if (is_bool_dtype(values) or is_period_arraylike(values)): arr = values - elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)): + elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) + or is_object_dtype(values)): arr = operator.pos(values) else: raise TypeError("Unary plus expects numeric dtype, not {}" @@ -3129,7 +3132,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) - axis, axis_ = self._get_axis(axis), axis + axis = self._get_axis(axis) if axis.is_unique: if level is not None: @@ -3138,24 +3141,25 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) - dropped = self.reindex(**{axis_name: new_axis}) - try: - dropped.axes[axis_].set_names(axis.names, inplace=True) - except AttributeError: - pass - result = dropped + result = self.reindex(**{axis_name: new_axis}) + # Case for non-unique axis else: labels = _ensure_object(com._index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') indexer = ~axis.get_level_values(level).isin(labels) + + # GH 18561 MultiIndex.drop should raise if label is absent + if errors == 'raise' and indexer.all(): + raise KeyError('{} not found in axis'.format(labels)) else: indexer = ~axis.isin(labels) - - if errors == 'raise' and indexer.all(): - raise KeyError('{} not found in axis'.format(labels)) + # Check if label doesn't exist along axis + labels_missing = (axis.get_indexer_for(labels) == -1).any() + if errors == 'raise' and labels_missing: + raise KeyError('{} not found in axis'.format(labels)) slicer = [slice(None)] * self.ndim slicer[self._get_axis_number(axis_name)] = indexer @@ -6429,9 +6433,11 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, # GH 17276 # numpy doesn't like NaN as a clip value # so ignore - if np.any(pd.isnull(lower)): + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None - if np.any(pd.isnull(upper)): + if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) @@ -8728,6 +8734,8 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, return rs def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): + if axis is None: + raise ValueError("Must specify 'axis' when aggregating by level.") grouped = self.groupby(level=level, axis=axis, sort=False) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) @@ -8969,18 +8977,17 @@ def _find_valid_index(self, how): is_valid = is_valid.any(1) # reduce axis 1 if how == 'first': - # First valid value case - i = is_valid.idxmax() - if not is_valid[i]: - return None - return i - - elif how == 'last': - # Last valid value case - i = is_valid.values[::-1].argmax() - if not is_valid.iat[len(self) - i - 1]: - return None - return self.index[len(self) - i - 1] + idxpos = is_valid.values[::].argmax() + + if how == 'last': + idxpos = len(self) - 1 - is_valid.values[::-1].argmax() + + chk_notna = is_valid.iat[idxpos] + idx = self.index[idxpos] + + if not chk_notna: + return None + return idx @Appender(_shared_docs['valid_index'] % {'position': 'first', 'klass': 'NDFrame'}) @@ -9055,8 +9062,15 @@ def _doc_parms(cls): Parameters ---------- -axis : int, default 0 - Select the axis which can be 0 for indices and 1 for columns. +axis : {0 or 'index', 1 or 'columns', None}, default 0 + Indicate which axis or axes should be reduced. + + * 0 / 'index' : reduce the index, return a Series whose index is the + original column labels. + * 1 / 'columns' : reduce the columns, return a Series whose index is the + original index. + * None : reduce all axes, return a scalar. + skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -9078,9 +9092,9 @@ def _doc_parms(cls): %(examples)s""" _all_doc = """\ -Return whether all elements are True over series or dataframe axis. +Return whether all elements are True, potentially over an axis. -Returns True if all elements within a series or along a dataframe +Returns True if all elements within a series or along a Dataframe axis are non-zero, not-empty or not-False.""" _all_examples = """\ @@ -9093,7 +9107,7 @@ def _doc_parms(cls): >>> pd.Series([True, False]).all() False -Dataframes +DataFrames Create a dataframe from a dictionary. @@ -9110,12 +9124,17 @@ def _doc_parms(cls): col2 False dtype: bool -Adding axis=1 argument will check if row-wise values all return True. +Specify ``axis='columns'`` to check if row-wise values all return True. ->>> df.all(axis=1) +>>> df.all(axis='columns') 0 True 1 False dtype: bool + +Or ``axis=None`` for whether every value is True. + +>>> df.all(axis=None) +False """ _all_see_also = """\ @@ -9481,6 +9500,11 @@ def _doc_parms(cls): 1 False dtype: bool +Aggregating over the entire DataFrame with ``axis=None``. + +>>> df.any(axis=None) +True + `any` for an empty DataFrame is an empty Series. >>> pd.DataFrame([]).any() @@ -9651,22 +9675,17 @@ def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f, @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, examples=examples, see_also=see_also) @Appender(_bool_doc) - def logical_func(self, axis=None, bool_only=None, skipna=None, level=None, + def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): nv.validate_logical_func(tuple(), kwargs, fname=name) - if skipna is None: - skipna = True - if axis is None: - axis = self._stat_axis_number if level is not None: if bool_only is not None: raise NotImplementedError("Option bool_only is not " "implemented with option level.") return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) - return self._reduce(f, axis=axis, skipna=skipna, - numeric_only=bool_only, filter_type='bool', - name=name) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=bool_only, filter_type='bool') return set_function_name(logical_func, name, cls) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index df7a5dc9dc173..9d227ef37595f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -3557,13 +3557,11 @@ def _aggregate_multiple_funcs(self, arg, _level): obj._selection = name results[name] = obj.aggregate(func) - if isinstance(list(compat.itervalues(results))[0], - DataFrame): - + if any(isinstance(x, DataFrame) for x in compat.itervalues(results)): # let higher level handle if _level: return results - return list(compat.itervalues(results))[0] + return DataFrame(results, columns=columns) def _wrap_output(self, output, index, names=None): diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index f9501cd2f9ddf..6f4fdfe5bf5cd 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -24,9 +24,9 @@ Sorting because non-concatenation axis is not aligned. A future version of pandas will change to not sort by default. -To accept the future behavior, pass 'sort=True'. +To accept the future behavior, pass 'sort=False'. -To retain the current behavior and silence the warning, pass sort=False +To retain the current behavior and silence the warning, pass 'sort=True'. """) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df39eb5fd8312..59527afe6c1f7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -31,6 +31,7 @@ is_dtype_equal, is_dtype_union_equal, is_object_dtype, + is_categorical, is_categorical_dtype, is_interval_dtype, is_period_dtype, @@ -96,7 +97,8 @@ def cmp_method(self, other): if needs_i8_conversion(self) and needs_i8_conversion(other): return self._evaluate_compare(other, op) - if is_object_dtype(self) and self.nlevels == 1: + from .multi import MultiIndex + if is_object_dtype(self) and not isinstance(self, MultiIndex): # don't pass MultiIndex with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) @@ -187,6 +189,9 @@ class Index(IndexOpsMixin, PandasObject): ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) + If dtype is None, we find the dtype that best fits the data. + If an actual dtype is provided, we coerce to that dtype if it's safe. + Otherwise, an error will be raised. copy : bool Make a copy of input ndarray name : object @@ -312,7 +317,14 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, if is_integer_dtype(dtype): inferred = lib.infer_dtype(data) if inferred == 'integer': - data = np.array(data, copy=copy, dtype=dtype) + try: + data = np.array(data, copy=copy, dtype=dtype) + except OverflowError: + # gh-15823: a more user-friendly error message + raise OverflowError( + "the elements provided in the data cannot " + "all be casted to the dtype {dtype}" + .format(dtype=dtype)) elif inferred in ['floating', 'mixed-integer-float']: if isna(data).any(): raise ValueError('cannot convert float ' @@ -424,12 +436,14 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif data is None or is_scalar(data): cls._scalar_data_error(data) else: - if tupleize_cols and is_list_like(data) and data: + if tupleize_cols and is_list_like(data): + # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) - # we must be all tuples, otherwise don't construct - # 10697 - if all(isinstance(e, tuple) for e in data): + + if data and all(isinstance(e, tuple) for e in data): + # we must be all tuples, otherwise don't construct + # 10697 from .multi import MultiIndex return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) @@ -1384,7 +1398,8 @@ def set_names(self, names, level=None, inplace=False): names=[u'baz', u'bar']) """ - if level is not None and self.nlevels == 1: + from .multi import MultiIndex + if level is not None and not isinstance(self, MultiIndex): raise ValueError('Level must be None for non-MultiIndex') if level is not None and not is_list_like(level) and is_list_like( @@ -3346,6 +3361,8 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): target = _ensure_index(target) + if is_categorical(target): + target = target.astype(target.dtype.categories.dtype) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) @@ -4375,7 +4392,7 @@ def drop(self, labels, errors='raise'): Raises ------ KeyError - If none of the labels are found in the selected axis + If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None labels = com._index_labels_to_array(labels, dtype=arr_dtype) @@ -4384,7 +4401,7 @@ def drop(self, labels, errors='raise'): if mask.any(): if errors != 'ignore': raise KeyError( - 'labels %s not contained in axis' % labels[mask]) + '{} not found in axis'.format(labels[mask])) indexer = indexer[~mask] return self.delete(indexer) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 3ffef5804acf7..587090fa72def 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -378,15 +378,15 @@ def _engine(self): # introspection @cache_readonly def is_unique(self): - return not self.duplicated().any() + return self._engine.is_unique @property def is_monotonic_increasing(self): - return Index(self.codes).is_monotonic_increasing + return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self): - return Index(self.codes).is_monotonic_decreasing + return self._engine.is_monotonic_decreasing @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs) def unique(self, level=None): @@ -598,7 +598,12 @@ def get_indexer_non_unique(self, target): target = ibase._ensure_index(target) if isinstance(target, CategoricalIndex): - target = target.categories + # Indexing on codes is more efficient if categories are the same: + if target.categories is self.categories: + target = target.codes + indexer, missing = self._engine.get_indexer_non_unique(target) + return _ensure_platform_int(indexer), missing + target = target.values codes = self.categories.get_indexer(target) indexer, missing = self._engine.get_indexer_non_unique(codes) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 83950f1d71633..0ddf33cdcae73 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2032,7 +2032,16 @@ def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. """ - return libts.ints_to_pydatetime(self.asi8, self.tz, box="time") + + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + if (self.tz is not None and self.tz is not utc): + timestamps = self._local_timestamps() + else: + timestamps = self.asi8 + + return libts.ints_to_pydatetime(timestamps, box="time") @property def date(self): @@ -2040,7 +2049,16 @@ def date(self): Returns numpy array of python datetime.date objects (namely, the date part of Timestamps without timezone information). """ - return libts.ints_to_pydatetime(self.normalize().asi8, box="date") + + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + if (self.tz is not None and self.tz is not utc): + timestamps = self._local_timestamps() + else: + timestamps = self.asi8 + + return libts.ints_to_pydatetime(timestamps, box="date") def normalize(self): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 408a8cc435b63..23a655b9a51ee 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -112,6 +112,10 @@ def maybe_convert_platform_interval(values): ------- array """ + if is_categorical_dtype(values): + # GH 21243/21253 + values = np.array(values) + if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is not @@ -156,7 +160,7 @@ class IntervalIndex(IntervalMixin, Index): dtype : dtype or None, default None If None, dtype will be inferred - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Attributes ---------- @@ -434,7 +438,7 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False, dtype : dtype or None, default None If None, dtype will be inferred - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Examples -------- @@ -564,7 +568,7 @@ def from_intervals(cls, data, closed=None, name=None, copy=False, dtype : dtype or None, default None If None, dtype will be inferred - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Examples -------- @@ -615,7 +619,7 @@ def from_tuples(cls, data, closed='right', name=None, copy=False, dtype : dtype or None, default None If None, dtype will be inferred - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Examples -------- @@ -667,7 +671,7 @@ def to_tuples(self, na_tuple=True): Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA value itself if False, ``nan``. - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Examples -------- @@ -1572,6 +1576,10 @@ def interval_range(start=None, end=None, periods=None, freq=None, periods += 1 if is_number(endpoint): + # force consistency between start/end/freq (lower end if freq skips it) + if com._all_not_none(start, end, freq): + end -= (end - start) % freq + # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 @@ -1580,10 +1588,6 @@ def interval_range(start=None, end=None, periods=None, freq=None, elif end is None: end = start + (periods - 1) * freq - # force end to be consistent with freq (lower if freq skips end) - if freq is not None: - end -= end % freq - breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com._not_none(start, end, freq)): # np.linspace always produces float output diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..9a4aa15f4cc25 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -11,6 +11,8 @@ from pandas.compat.numpy import function as nv from pandas import compat +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, PandasExtensionDtype) from pandas.core.dtypes.common import ( _ensure_int64, _ensure_platform_int, @@ -672,30 +674,18 @@ def _set_names(self, names, level=None, validate=True): if level is None: level = range(self.nlevels) - used = {} else: level = [self._get_level_number(l) for l in level] - used = {self.levels[l].name: l - for l in set(range(self.nlevels)) - set(level)} # set the name for l, name in zip(level, names): if name is not None: - # GH 20527 # All items in 'names' need to be hashable: if not is_hashable(name): raise TypeError('{}.name must be a hashable type' .format(self.__class__.__name__)) - - if name in used: - raise ValueError( - 'Duplicated level name: "{}", assigned to ' - 'level {}, is already used for level ' - '{}.'.format(name, l, used[name])) - self.levels[l].rename(name, inplace=True) - used[name] = l names = property(fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex") @@ -820,20 +810,16 @@ def values(self): return self._tuples values = [] - for lev, lab in zip(self.levels, self.labels): - # Need to box timestamps, etc. - box = hasattr(lev, '_box_values') - # Try to minimize boxing. - if box and len(lev) > len(lab): - taken = lev._box_values(algos.take_1d(lev._ndarray_values, - lab)) - elif box: - taken = algos.take_1d(lev._box_values(lev._ndarray_values), - lab, - fill_value=lev._na_value) - else: - taken = algos.take_1d(np.asarray(lev._values), lab) - values.append(taken) + + for i in range(self.nlevels): + vals = self._get_level_values(i) + if is_categorical_dtype(vals): + vals = vals.get_values() + if (isinstance(vals.dtype, (PandasExtensionDtype, ExtensionDtype)) + or hasattr(vals, '_box_values')): + vals = vals.astype(object) + vals = np.array(vals, copy=False) + values.append(vals) self._tuples = lib.fast_zip(values) return self._tuples @@ -852,14 +838,6 @@ def _has_complex_internals(self): # to disable groupby tricks return True - @cache_readonly - def is_monotonic(self): - """ - return if the index is monotonic increasing (only equal or - increasing) values. - """ - return self.is_monotonic_increasing - @cache_readonly def is_monotonic_increasing(self): """ @@ -887,10 +865,6 @@ def is_monotonic_decreasing(self): # monotonic decreasing if and only if reverse is monotonic increasing return self[::-1].is_monotonic_increasing - @cache_readonly - def is_unique(self): - return not self.duplicated().any() - @cache_readonly def _have_mixed_levels(self): """ return a boolean list indicated if we have mixed levels """ @@ -1719,7 +1693,6 @@ def drop(self, labels, level=None, errors='raise'): if errors != 'ignore': raise ValueError('labels %s not contained in axis' % labels[mask]) - indexer = indexer[~mask] except Exception: pass @@ -2948,6 +2921,13 @@ def isin(self, values, level=None): else: return np.lib.arraysetops.in1d(labs, sought_labels) + def _reference_duplicate_name(self, name): + """ + Returns True if the name refered to in self.names is duplicated. + """ + # count the times name equals an element in self.names. + return sum(name == n for n in self.names) > 1 + MultiIndex._add_numeric_methods_disabled() MultiIndex._add_numeric_methods_add_sub_disabled() diff --git a/pandas/core/ops.py b/pandas/core/ops.py index e14f82906cd06..540ebeee438f6 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -5,7 +5,10 @@ """ # necessary to enforce truediv in Python 2.X from __future__ import division +import datetime import operator +import textwrap +import warnings import numpy as np import pandas as pd @@ -1197,8 +1200,35 @@ def wrapper(self, other, axis=None): if is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior + if (isinstance(other, datetime.date) and + not isinstance(other, datetime.datetime)): + # https://github.com/pandas-dev/pandas/issues/21152 + # Compatibility for difference between Series comparison w/ + # datetime and date + msg = ( + "Comparing Series of datetimes with 'datetime.date'. " + "Currently, the 'datetime.date' is coerced to a " + "datetime. In the future pandas will not coerce, " + "and {future}. " + "To retain the current behavior, " + "convert the 'datetime.date' to a datetime with " + "'pd.Timestamp'." + ) + + if op in {operator.lt, operator.le, operator.gt, operator.ge}: + future = "a TypeError will be raised" + else: + future = ( + "'the values will not compare equal to the " + "'datetime.date'" + ) + msg = '\n'.join(textwrap.wrap(msg.format(future=future))) + warnings.warn(msg, FutureWarning, stacklevel=2) + other = pd.Timestamp(other) + res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) + return self._constructor(res_values, index=self.index, name=res_name) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 16e64192fdb20..bad0dd79aaedd 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1143,13 +1143,26 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, raise NotImplementedError('Panel.{0} does not implement ' 'numeric_only.'.format(name)) - axis_name = self._get_axis_name(axis) - axis_number = self._get_axis_number(axis_name) + if axis is None and filter_type == 'bool': + # labels = None + # constructor = None + axis_number = None + axis_name = None + else: + # TODO: Make other agg func handle axis=None properly + axis = self._get_axis_number(axis) + # labels = self._get_agg_axis(axis) + # constructor = self._constructor + axis_name = self._get_axis_name(axis) + axis_number = self._get_axis_number(axis_name) + f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds) with np.errstate(all='ignore'): result = f(self.values) + if axis is None and filter_type == 'bool': + return np.bool_(result) axes = self._get_plane_axes(axis_name) if result.ndim == 2 and axis_name != self._info_axis_name: result = result.T diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 0707cc756682e..e6b9f88c52cd7 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1383,8 +1383,7 @@ def _get_time_delta_bins(self, ax): data=[], freq=self.freq, name=ax.name) return binner, [], labels - start = ax[0] - end = ax[-1] + start, end = ax.min(), ax.max() labels = binner = TimedeltaIndex(start=start, end=end, freq=self.freq, diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4d8897fb7c811..d69d79ca9b098 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -28,6 +28,7 @@ is_int_or_datetime_dtype, is_dtype_equal, is_bool, + is_bool_dtype, is_list_like, is_datetimelike, _ensure_int64, @@ -974,9 +975,14 @@ def _maybe_coerce_merge_keys(self): # Check if we are trying to merge on obviously # incompatible dtypes GH 9780, GH 15800 - elif is_numeric_dtype(lk) and not is_numeric_dtype(rk): + + # boolean values are considered as numeric, but are still allowed + # to be merged on object boolean values + elif ((is_numeric_dtype(lk) and not is_bool_dtype(lk)) + and not is_numeric_dtype(rk)): raise ValueError(msg) - elif not is_numeric_dtype(lk) and is_numeric_dtype(rk): + elif (not is_numeric_dtype(lk) + and (is_numeric_dtype(rk) and not is_bool_dtype(rk))): raise ValueError(msg) elif is_datetimelike(lk) and not is_datetimelike(rk): raise ValueError(msg) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index e02420323704e..9a2ad5d13d77a 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -1,8 +1,10 @@ # pylint: disable=E1103 -from pandas.core.dtypes.common import is_list_like, is_scalar +from pandas.core.dtypes.common import ( + is_list_like, is_scalar, is_integer_dtype) from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.reshape.concat import concat from pandas.core.series import Series @@ -79,8 +81,22 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', pass values = list(values) - grouped = data.groupby(keys, observed=dropna) + # group by the cartesian product of the grouper + # if we have a categorical + grouped = data.groupby(keys, observed=False) agged = grouped.agg(aggfunc) + if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): + agged = agged.dropna(how='all') + + # gh-21133 + # we want to down cast if + # the original values are ints + # as we grouped with a NaN value + # and then dropped, coercing to floats + for v in [v for v in values if v in data and v in agged]: + if (is_integer_dtype(data[v]) and + not is_integer_dtype(agged[v])): + agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype) table = agged if table.index.nlevels > 1: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 0829aa8f5a509..3d9e84954a63b 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -115,6 +115,12 @@ def __init__(self, values, index, level=-1, value_columns=None, self.index = index.remove_unused_levels() + if isinstance(self.index, MultiIndex): + if index._reference_duplicate_name(level): + msg = ("Ambiguous reference to {level}. The index " + "names are not unique.".format(level=level)) + raise ValueError(msg) + self.level = self.index._get_level_number(level) # when index includes `nan`, need to lift levels/strides by 1 @@ -528,6 +534,12 @@ def factorize(index): N, K = frame.shape + if isinstance(frame.columns, MultiIndex): + if frame.columns._reference_duplicate_name(level): + msg = ("Ambiguous reference to {level}. The column " + "names are not unique.".format(level=level)) + raise ValueError(msg) + # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) @@ -725,7 +737,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, ---------- data : array-like, Series, or DataFrame prefix : string, list of strings, or dict of strings, default None - String to append DataFrame column names + String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..6b005c673c7cd 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -40,6 +40,7 @@ maybe_convert_platform, maybe_cast_to_datetime, maybe_castable, construct_1d_arraylike_from_scalar, + construct_1d_ndarray_preserving_na, construct_1d_object_array_from_listlike) from pandas.core.dtypes.missing import ( isna, @@ -1195,12 +1196,13 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = com._default_index(len(self)) - if level is not None and isinstance(self.index, MultiIndex): + if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if len(level) < len(self.index.levels): - new_index = self.index.droplevel(level) + if isinstance(self.index, MultiIndex): + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index @@ -2616,7 +2618,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) index = self.index - if level: + if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): @@ -3210,7 +3212,8 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, delegate = self._values if isinstance(delegate, np.ndarray): # Validate that 'axis' is consistent with Series's single axis. - self._get_axis_number(axis) + if axis is not None: + self._get_axis_number(axis) if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) @@ -3268,7 +3271,7 @@ def rename(self, index=None, **kwargs): copy : boolean, default True Also copy underlying data inplace : boolean, default False - Whether to return a new %(klass)s. If True then value of copy is + Whether to return a new Series. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified @@ -4046,7 +4049,8 @@ def _try_cast(arr, take_fast_path): isinstance(subarr, np.ndarray))): subarr = construct_1d_object_array_from_listlike(subarr) elif not is_extension_type(subarr): - subarr = np.array(subarr, dtype=dtype, copy=copy) + subarr = construct_1d_ndarray_preserving_na(subarr, dtype, + copy=copy) except (ValueError, TypeError): if is_categorical_dtype(dtype): # We *do* allow casting to categorical, since we know diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index e550976d1deeb..212f44e55c489 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -52,7 +52,21 @@ def _int64_cut_off(shape): return i return len(shape) - def loop(labels, shape): + def maybe_lift(lab, size): + # promote nan values (assigned -1 label in lab array) + # so that all output values are non-negative + return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) + + labels = map(_ensure_int64, labels) + if not xnull: + labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) + + labels = list(labels) + shape = list(shape) + + # Iteratively process all the labels in chunks sized so less + # than _INT64_MAX unique int ids will be required for each chunk + while True: # how many levels can be done without overflow: nlev = _int64_cut_off(shape) @@ -74,7 +88,7 @@ def loop(labels, shape): out[mask] = -1 if nlev == len(shape): # all levels done! - return out + break # compress what has been done so far in order to avoid overflow # to retain lexical ranks, obs_ids should be sorted @@ -83,16 +97,7 @@ def loop(labels, shape): labels = [comp_ids] + labels[nlev:] shape = [len(obs_ids)] + shape[nlev:] - return loop(labels, shape) - - def maybe_lift(lab, size): # pormote nan values - return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) - - labels = map(_ensure_int64, labels) - if not xnull: - labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) - - return loop(list(labels), list(shape)) + return out def get_compressed_ids(labels, sizes): diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 5532d7522cd2d..ff58f7d104ff9 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -290,6 +290,7 @@ def __reduce__(self): """Necessary for making this object picklable""" object_state = list(np.ndarray.__reduce__(self)) subclass_state = self.fill_value, self.sp_index + object_state[2] = self.sp_values.__reduce__()[2] object_state[2] = (object_state[2], subclass_state) return tuple(object_state) @@ -339,6 +340,10 @@ def values(self): output.put(int_index.indices, self) return output + @property + def shape(self): + return (len(self),) + @property def sp_values(self): # caching not an option, leaks memory diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 81d775157cf62..44811781837bc 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -241,7 +241,7 @@ def str_count(arr, pat, flags=0): Escape ``'$'`` to find the literal dollar sign. >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat']) - >>> s.str.count('\$') + >>> s.str.count('\\$') 0 1 1 0 2 1 @@ -358,7 +358,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): Returning any digit using regular expression. - >>> s1.str.contains('\d', regex=True) + >>> s1.str.contains('\\d', regex=True) 0 False 1 False 2 False @@ -2172,9 +2172,9 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): Returns ------- - concat : str if `other is None`, Series/Index of objects if `others is - not None`. In the latter case, the result will remain categorical - if the calling Series/Index is categorical. + concat : str or Series/Index of objects + If `others` is None, `str` is returned, otherwise a `Series/Index` + (same type as caller) of objects is returned. See Also -------- diff --git a/pandas/core/window.py b/pandas/core/window.py index 015e7f7913ed0..9d0f9dc4f75f9 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -602,8 +602,8 @@ def validate(self): if isinstance(window, (list, tuple, np.ndarray)): pass elif is_integer(window): - if window < 0: - raise ValueError("window must be non-negative") + if window <= 0: + raise ValueError("window must be > 0 ") try: import scipy.signal as sig except ImportError: diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index dcc221ce978b3..b3f40b3a2429c 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,6 +1,7 @@ """ io on the clipboard """ from pandas import compat, get_option, option_context, DataFrame -from pandas.compat import StringIO, PY2 +from pandas.compat import StringIO, PY2, PY3 +import warnings def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover @@ -32,7 +33,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover # try to decode (if needed on PY3) # Strange. linux py33 doesn't complain, win py33 does - if compat.PY3: + if PY3: try: text = compat.bytes_to_str( text, encoding=(kwargs.get('encoding') or @@ -55,11 +56,27 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: - sep = r'\t' + sep = '\t' + # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get('delim_whitespace') is None: sep = r'\s+' + # Regex separator currently only works with python engine. + # Default to python if separator is multi-character (regex) + if len(sep) > 1 and kwargs.get('engine') is None: + kwargs['engine'] = 'python' + elif len(sep) > 1 and kwargs.get('engine') == 'c': + warnings.warn('read_clipboard with regex separator does not work' + ' properly with c engine') + + # In PY2, the c table reader first encodes text with UTF-8 but Python + # table reader uses the format of the passed string. For consistency, + # encode strings for python engine so that output from python and c + # engines produce consistent results + if kwargs.get('engine') == 'python' and PY2: + text = text.encode('utf-8') + return read_table(StringIO(text), sep=sep, **kwargs) @@ -99,7 +116,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover if excel: try: if sep is None: - sep = r'\t' + sep = '\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) @@ -108,8 +125,11 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover text = text.decode('utf-8') clipboard_set(text) return - except: - pass + except TypeError: + warnings.warn('to_clipboard in excel mode requires a single ' + 'character separator.') + elif sep is not None: + warnings.warn('to_clipboard with excel=False ignores the sep argument') if isinstance(obj, DataFrame): # str(df) has various unhelpful defaults, like truncation diff --git a/pandas/io/common.py b/pandas/io/common.py index 0827216975f15..ac9077f2db50e 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -5,7 +5,7 @@ import codecs import mmap from contextlib import contextmanager, closing -from zipfile import ZipFile +import zipfile from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat @@ -428,7 +428,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, return f, handles -class BytesZipFile(ZipFile, BytesIO): +class BytesZipFile(zipfile.ZipFile, BytesIO): """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. @@ -437,14 +437,18 @@ class BytesZipFile(ZipFile, BytesIO): bytes strings into a member of the archive. """ # GH 17778 - def __init__(self, file, mode='r', **kwargs): + def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs): if mode in ['wb', 'rb']: mode = mode.replace('b', '') - super(BytesZipFile, self).__init__(file, mode, **kwargs) + super(BytesZipFile, self).__init__(file, mode, compression, **kwargs) def write(self, data): super(BytesZipFile, self).writestr(self.filename, data) + @property + def closed(self): + return self.fp is None + class MMapWrapper(BaseIterator): """ diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 29b8d29af0808..60518f596e9af 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -5,7 +5,10 @@ from __future__ import print_function +import warnings + import csv as csvlib +from zipfile import ZipFile import numpy as np from pandas.core.dtypes.missing import notna @@ -127,14 +130,31 @@ def save(self): else: encoding = self.encoding - if hasattr(self.path_or_buf, 'write'): + # GH 21227 internal compression is not used when file-like passed. + if self.compression and hasattr(self.path_or_buf, 'write'): + msg = ("compression has no effect when passing file-like " + "object as input.") + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + # when zip compression is called. + is_zip = isinstance(self.path_or_buf, ZipFile) or ( + not hasattr(self.path_or_buf, 'write') + and self.compression == 'zip') + + if is_zip: + # zipfile doesn't support writing string to archive. uses string + # buffer to receive csv writing and dump into zip compression + # file handle. GH 21241, 21118 + f = StringIO() + close = False + elif hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=encoding, - compression=None) - close = True if self.compression is None else False + compression=self.compression) + close = True try: writer_kwargs = dict(lineterminator=self.line_terminator, @@ -151,18 +171,21 @@ def save(self): self._save() finally: - # GH 17778 handles compression for byte strings. - if not close and self.compression: - f.close() - with open(self.path_or_buf, 'r') as f: - data = f.read() - f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=encoding, - compression=self.compression) - f.write(data) - close = True + if is_zip: + # GH 17778 handles zip compression separately. + buf = f.getvalue() + if hasattr(self.path_or_buf, 'write'): + self.path_or_buf.write(buf) + else: + f, handles = _get_handle(self.path_or_buf, self.mode, + encoding=encoding, + compression=self.compression) + f.write(buf) + close = True if close: f.close() + for _fh in handles: + _fh.close() def _save_header(self): diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 12201f62946ac..c46f4b5ad9c18 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -636,10 +636,14 @@ def to_string(self): mid = int(round(n_cols / 2.)) mid_ix = col_lens.index[mid] col_len = col_lens[mid_ix] - adj_dif -= (col_len + 1) # adjoin adds one + # adjoin adds one + adj_dif -= (col_len + 1) col_lens = col_lens.drop(mid_ix) n_cols = len(col_lens) - max_cols_adj = n_cols - self.index # subtract index column + # subtract index column + max_cols_adj = n_cols - self.index + # GH-21180. Ensure that we print at least two. + max_cols_adj = max(max_cols_adj, 2) self.max_cols_adj = max_cols_adj # Call again _chk_truncate to cut frame appropriately @@ -778,7 +782,7 @@ def space_format(x, y): str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns])) - if self.sparsify: + if self.sparsify and len(str_columns): str_columns = _sparsify(str_columns) str_columns = [list(x) for x in zip(*str_columns)] diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 549204abd3caf..2004a24c2ec5a 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -80,8 +80,6 @@ def nested_to_record(ds, prefix="", sep=".", level=0): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v - if v is None: # pop the key if the value is None - new_d.pop(k) continue else: v = new_d.pop(k) @@ -172,6 +170,11 @@ def json_normalize(data, record_path=None, meta=None, 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH + >>> data = {'A': [1, 2]} + >>> json_normalize(data, 'A', record_prefix='Prefix.') + Prefix.0 + 0 1 + 1 2 """ def _pull_field(js, spec): result = js @@ -261,7 +264,8 @@ def _recursive_extract(data, path, seen_meta, level=0): result = DataFrame(records) if record_prefix is not None: - result.rename(columns=lambda x: record_prefix + x, inplace=True) + result = result.rename( + columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem for k, v in compat.iteritems(meta_vals): diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 01f7db7d68664..5cea64388bdd7 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -296,7 +296,7 @@ def parse_table_schema(json, precise_float): """ table = loads(json, precise_float=precise_float) col_order = [field['name'] for field in table['schema']['fields']] - df = DataFrame(table['data'])[col_order] + df = DataFrame(table['data'], columns=col_order)[col_order] dtypes = {field['name']: convert_json_field_to_pandas_type(field) for field in table['schema']['fields']} diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2c8f98732c92f..65df2bffb4abf 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -3209,12 +3209,22 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): col = columns[k] if is_integer(k) else k dtype[col] = v - if index_col is None or index_col is False: + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic emtpy Index. + if (index_col is None or index_col is False) or index_names is None: index = Index([]) else: data = [Series([], dtype=dtype[name]) for name in index_names] index = _ensure_index_from_sequences(data, names=index_names) index_col.sort() + for i, n in enumerate(index_col): columns.pop(n - i) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ccb8d2d99d734..a582d32741ae9 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -572,29 +572,8 @@ def create(self): else: self._execute_create() - def insert_statement(self, data, conn): - """ - Generate tuple of SQLAlchemy insert statement and any arguments - to be executed by connection (via `_execute_insert`). - - Parameters - ---------- - conn : SQLAlchemy connectable(engine/connection) - Connection to recieve the data - data : list of dict - The data to be inserted - - Returns - ------- - SQLAlchemy statement - insert statement - *, optional - Additional parameters to be passed when executing insert statement - """ - dialect = getattr(conn, 'dialect', None) - if dialect and getattr(dialect, 'supports_multivalues_insert', False): - return self.table.insert(data), - return self.table.insert(), data + def insert_statement(self): + return self.table.insert() def insert_data(self): if self.index is not None: @@ -633,9 +612,8 @@ def insert_data(self): return column_names, data_list def _execute_insert(self, conn, keys, data_iter): - """Insert data into this table with database connection""" data = [{k: v for k, v in zip(keys, row)} for row in data_iter] - conn.execute(*self.insert_statement(data, conn)) + conn.execute(self.insert_statement(), data) def insert(self, chunksize=None): keys, data_list = self.insert_data() diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 8f91c7a497e2d..2797924985c70 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1758,11 +1758,25 @@ def value_labels(self): return self.value_label_dict -def _open_file_binary_write(fname, encoding): +def _open_file_binary_write(fname): + """ + Open a binary file or no-op if file-like + + Parameters + ---------- + fname : string path, path object or buffer + + Returns + ------- + file : file-like object + File object supporting write + own : bool + True if the file was created, otherwise False + """ if hasattr(fname, 'write'): # if 'b' not in fname.mode: - return fname - return open(fname, "wb") + return fname, False + return open(fname, "wb"), True def _set_endianness(endianness): @@ -1899,7 +1913,9 @@ class StataWriter(StataParser): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. .. versionadded:: 0.23.0 support for pathlib, py.path. @@ -1970,6 +1986,7 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels + self._own_file = True # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) @@ -2183,9 +2200,7 @@ def _prepare_pandas(self, data): self.fmtlist[key] = self._convert_dates[key] def write_file(self): - self._file = _open_file_binary_write( - self._fname, self._encoding or self._default_encoding - ) + self._file, self._own_file = _open_file_binary_write(self._fname) try: self._write_header(time_stamp=self._time_stamp, data_label=self._data_label) @@ -2205,6 +2220,23 @@ def write_file(self): self._write_file_close_tag() self._write_map() finally: + self._close() + + def _close(self): + """ + Close the file if it was created by the writer. + + If a buffer or file-like object was passed in, for example a GzipFile, + then leave this file open for the caller to close. In either case, + attempt to flush the file contents to ensure they are written to disk + (if supported) + """ + # Some file-like objects might not support flush + try: + self._file.flush() + except AttributeError: + pass + if self._own_file: self._file.close() def _write_map(self): @@ -2374,7 +2406,7 @@ def _prepare_data(self): def _write_data(self): data = self.data - data.tofile(self._file) + self._file.write(data.tobytes()) def _null_terminate(self, s, as_string=False): null_byte = '\x00' @@ -2641,7 +2673,9 @@ class StataWriter117(StataWriter): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. data : DataFrame Input to save convert_dates : dict @@ -2879,7 +2913,7 @@ def _write_data(self): self._update_map('data') data = self.data self._file.write(b'<data>') - data.tofile(self._file) + self._file.write(data.tobytes()) self._file.write(b'</data>') def _write_strls(self): diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 87b7d13251f28..d1a2121597dd6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -811,7 +811,7 @@ class PlanePlot(MPLPlot): def __init__(self, data, x, y, **kwargs): MPLPlot.__init__(self, data, **kwargs) if x is None or y is None: - raise ValueError(self._kind + ' requires and x and y column') + raise ValueError(self._kind + ' requires an x and y column') if is_integer(x) and not self.data.columns.holds_integer(): x = self.data.columns[x] if is_integer(y) and not self.data.columns.holds_integer(): diff --git a/pandas/tests/categorical/test_indexing.py b/pandas/tests/categorical/test_indexing.py index 9c27b1101e5ca..cf7b5cfa55882 100644 --- a/pandas/tests/categorical/test_indexing.py +++ b/pandas/tests/categorical/test_indexing.py @@ -5,7 +5,7 @@ import numpy as np import pandas.util.testing as tm -from pandas import Categorical, Index, PeriodIndex +from pandas import Categorical, Index, CategoricalIndex, PeriodIndex from pandas.tests.categorical.common import TestCategorical @@ -103,3 +103,21 @@ def f(): s.categories = [1, 2] pytest.raises(ValueError, f) + + # Combinations of sorted/unique: + @pytest.mark.parametrize("idx_values", [[1, 2, 3, 4], [1, 3, 2, 4], + [1, 3, 3, 4], [1, 2, 2, 4]]) + # Combinations of missing/unique + @pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]]) + @pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex]) + def test_get_indexer_non_unique(self, idx_values, key_values, key_class): + # GH 21448 + key = key_class(key_values, categories=range(1, 5)) + # Test for flat index and CategoricalIndex with same/different cats: + for dtype in None, 'category', key.dtype: + idx = Index(idx_values, dtype=dtype) + expected, exp_miss = idx.get_indexer_non_unique(key_values) + result, res_miss = idx.get_indexer_non_unique(key) + + tm.assert_numpy_array_equal(expected, result) + tm.assert_numpy_array_equal(exp_miss, res_miss) diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py index 5133c97d8b590..c78f02245a5b4 100644 --- a/pandas/tests/categorical/test_missing.py +++ b/pandas/tests/categorical/test_missing.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +import collections + import numpy as np import pytest @@ -68,3 +70,16 @@ def test_fillna_raises(self, fillna_kwargs, msg): with tm.assert_raises_regex(ValueError, msg): cat.fillna(**fillna_kwargs) + + @pytest.mark.parametrize("named", [True, False]) + def test_fillna_iterable_category(self, named): + # https://github.com/pandas-dev/pandas/issues/21097 + if named: + Point = collections.namedtuple("Point", "x y") + else: + Point = lambda *args: args # tuple + cat = Categorical([Point(0, 0), Point(0, 1), None]) + result = cat.fillna(Point(0, 0)) + expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)]) + + tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index 20cd8b43478d2..4a19682e2c558 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -23,6 +23,7 @@ maybe_convert_scalar, find_common_type, construct_1d_object_array_from_listlike, + construct_1d_ndarray_preserving_na, construct_1d_arraylike_from_scalar) from pandas.core.dtypes.dtypes import ( CategoricalDtype, @@ -440,3 +441,15 @@ def test_cast_1d_arraylike_from_scalar_categorical(self): tm.assert_categorical_equal(result, expected, check_category_order=True, check_dtype=True) + + +@pytest.mark.parametrize('values, dtype, expected', [ + ([1, 2, 3], None, np.array([1, 2, 3])), + (np.array([1, 2, 3]), None, np.array([1, 2, 3])), + (['1', '2', None], None, np.array(['1', '2', None])), + (['1', '2', None], np.dtype('str'), np.array(['1', '2', None])), + ([1, 2, None], np.dtype('str'), np.array(['1', '2', None])), +]) +def test_construct_1d_ndarray_preserving_na(values, dtype, expected): + result = construct_1d_ndarray_preserving_na(values, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 32cf29818e069..af26d83df3fe2 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -18,6 +18,11 @@ def test_isna(self, data_missing): expected = pd.Series(expected) self.assert_series_equal(result, expected) + # GH 21189 + result = pd.Series(data_missing).drop([0, 1]).isna() + expected = pd.Series([], dtype=bool) + self.assert_series_equal(result, expected) + def test_dropna_series(self, data_missing): ser = pd.Series(data_missing) result = ser.dropna() diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index e9431bd0c233c..90f0181beab0d 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -90,7 +90,7 @@ def nbytes(self): return 0 def isna(self): - return np.array([x.is_nan() for x in self._data]) + return np.array([x.is_nan() for x in self._data], dtype=bool) @property def _na_value(self): diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 88bb66f38b35c..10be7836cb8d7 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -108,7 +108,8 @@ def nbytes(self): return sys.getsizeof(self.data) def isna(self): - return np.array([x == self.dtype.na_value for x in self.data]) + return np.array([x == self.dtype.na_value for x in self.data], + dtype=bool) def take(self, indexer, allow_fill=False, fill_value=None): # re-implement here, since NumPy has trouble setting diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 164d6746edec0..21961906c39bb 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -130,19 +130,27 @@ def test_set_index2(self): result = df.set_index(df.C) assert result.index.name == 'C' - @pytest.mark.parametrize('level', ['a', pd.Series(range(3), name='a')]) + @pytest.mark.parametrize( + 'level', ['a', pd.Series(range(0, 8, 2), name='a')]) def test_set_index_duplicate_names(self, level): - # GH18872 + # GH18872 - GH19029 df = pd.DataFrame(np.arange(8).reshape(4, 2), columns=['a', 'b']) # Pass an existing level name: df.index.name = 'a' - pytest.raises(ValueError, df.set_index, level, append=True) - pytest.raises(ValueError, df.set_index, [level], append=True) - - # Pass twice the same level name: - df.index.name = 'c' - pytest.raises(ValueError, df.set_index, [level, level]) + expected = pd.MultiIndex.from_tuples([(0, 0), (1, 2), (2, 4), (3, 6)], + names=['a', 'a']) + result = df.set_index(level, append=True) + tm.assert_index_equal(result.index, expected) + result = df.set_index([level], append=True) + tm.assert_index_equal(result.index, expected) + + # Pass twice the same level name (only works with passing actual data) + if isinstance(level, pd.Series): + result = df.set_index([level, level]) + expected = pd.MultiIndex.from_tuples( + [(0, 0), (2, 2), (4, 4), (6, 6)], names=['a', 'a']) + tm.assert_index_equal(result.index, expected) def test_set_index_nonuniq(self): df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], @@ -617,6 +625,19 @@ def test_reorder_levels(self): index=e_idx) assert_frame_equal(result, expected) + result = df.reorder_levels([0, 0, 0]) + e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']], + labels=[[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]], + names=['L0', 'L0', 'L0']) + expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, + index=e_idx) + assert_frame_equal(result, expected) + + result = df.reorder_levels(['L0', 'L0', 'L0']) + assert_frame_equal(result, expected) + def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index d1a4a5f615b86..415ae982673ee 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -12,10 +12,10 @@ from numpy.random import randn import numpy as np -from pandas.compat import lrange, product, PY35 +from pandas.compat import lrange, PY35 from pandas import (compat, isna, notna, DataFrame, Series, MultiIndex, date_range, Timestamp, Categorical, - _np_version_under1p12, _np_version_under1p15) + _np_version_under1p12) import pandas as pd import pandas.core.nanops as nanops import pandas.core.algorithms as algorithms @@ -1139,11 +1139,35 @@ def test_any_all(self): self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True) self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True) - df = DataFrame(randn(10, 4)) > 0 - df.any(1) - df.all(1) - df.any(1, bool_only=True) - df.all(1, bool_only=True) + def test_any_all_extra(self): + df = DataFrame({ + 'A': [True, False, False], + 'B': [True, True, False], + 'C': [True, True, True], + }, index=['a', 'b', 'c']) + result = df[['A', 'B']].any(1) + expected = Series([True, True, False], index=['a', 'b', 'c']) + tm.assert_series_equal(result, expected) + + result = df[['A', 'B']].any(1, bool_only=True) + tm.assert_series_equal(result, expected) + + result = df.all(1) + expected = Series([True, False, False], index=['a', 'b', 'c']) + tm.assert_series_equal(result, expected) + + result = df.all(1, bool_only=True) + tm.assert_series_equal(result, expected) + + # Axis is None + result = df.all(axis=None).item() + assert result is False + + result = df.any(axis=None).item() + assert result is True + + result = df[['C']].all(axis=None).item() + assert result is True # skip pathological failure cases # class CantNonzero(object): @@ -1165,6 +1189,86 @@ def test_any_all(self): # df.any(1, bool_only=True) # df.all(1, bool_only=True) + @pytest.mark.parametrize('func, data, expected', [ + (np.any, {}, False), + (np.all, {}, True), + (np.any, {'A': []}, False), + (np.all, {'A': []}, True), + (np.any, {'A': [False, False]}, False), + (np.all, {'A': [False, False]}, False), + (np.any, {'A': [True, False]}, True), + (np.all, {'A': [True, False]}, False), + (np.any, {'A': [True, True]}, True), + (np.all, {'A': [True, True]}, True), + + (np.any, {'A': [False], 'B': [False]}, False), + (np.all, {'A': [False], 'B': [False]}, False), + + (np.any, {'A': [False, False], 'B': [False, True]}, True), + (np.all, {'A': [False, False], 'B': [False, True]}, False), + + # other types + (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False), + (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True), + (np.all, {'A': pd.Series([0, 1], dtype=int)}, False), + (np.any, {'A': pd.Series([0, 1], dtype=int)}, True), + pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + (np.all, {'A': pd.Series([0, 1], dtype='category')}, False), + (np.any, {'A': pd.Series([0, 1], dtype='category')}, True), + (np.all, {'A': pd.Series([1, 2], dtype='category')}, True), + (np.any, {'A': pd.Series([1, 2], dtype='category')}, True), + + # # Mix + # GH-21484 + # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), + # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), + ]) + def test_any_all_np_func(self, func, data, expected): + # https://github.com/pandas-dev/pandas/issues/19976 + data = DataFrame(data) + result = func(data) + assert isinstance(result, np.bool_) + assert result.item() is expected + + # method version + result = getattr(DataFrame(data), func.__name__)(axis=None) + assert isinstance(result, np.bool_) + assert result.item() is expected + + def test_any_all_object(self): + # https://github.com/pandas-dev/pandas/issues/19976 + result = np.all(DataFrame(columns=['a', 'b'])).item() + assert result is True + + result = np.any(DataFrame(columns=['a', 'b'])).item() + assert result is False + + @pytest.mark.parametrize('method', ['any', 'all']) + def test_any_all_level_axis_none_raises(self, method): + df = DataFrame( + {"A": 1}, + index=MultiIndex.from_product([['A', 'B'], ['a', 'b']], + names=['out', 'in']) + ) + xpr = "Must specify 'axis' when aggregating by level." + with tm.assert_raises_regex(ValueError, xpr): + getattr(df, method)(axis=None, level='out') + def _check_bool_op(self, name, alternative, frame=None, has_skipna=True, has_bool_only=False): if frame is None: @@ -1507,6 +1611,23 @@ def test_duplicated_with_misspelled_column_name(self, subset): with pytest.raises(KeyError): df.drop_duplicates(subset) + @pytest.mark.slow + def test_duplicated_do_not_fail_on_wide_dataframes(self): + # gh-21524 + # Given the wide dataframe with a lot of columns + # with different (important!) values + data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000) + for i in range(100)} + df = pd.DataFrame(data).T + result = df.duplicated() + + # Then duplicates produce the bool pd.Series as a result + # and don't fail during calculation. + # Actual values doesn't matter here, though usually + # it's all False in this case + assert isinstance(result, pd.Series) + assert result.dtype == np.bool + def test_drop_duplicates_with_duplicate_column_names(self): # GH17836 df = DataFrame([ @@ -2054,9 +2175,6 @@ def test_clip_against_list_like(self, inplace, lower, axis, res): result = original tm.assert_frame_equal(result, expected, check_exact=True) - @pytest.mark.xfail( - not _np_version_under1p15, - reason="failing under numpy-dev gh-19976") @pytest.mark.parametrize("axis", [0, 1, None]) def test_clip_against_frame(self, axis): df = DataFrame(np.random.randn(1000, 2)) @@ -2077,13 +2195,23 @@ def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH # 17276 tm.assert_frame_equal(self.frame.clip(np.nan), self.frame) - tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]), - self.frame) - tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]), - self.frame) tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan), self.frame) + # GH #19992 + df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6], + 'col_2': [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan], + 'col_2': [7, 8, np.nan]}) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6], + 'col_2': [np.nan, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + # Matrix-like def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], @@ -2240,54 +2368,49 @@ class TestNLargestNSmallest(object): # ---------------------------------------------------------------------- # Top / bottom - @pytest.mark.parametrize( - 'method, n, order', - product(['nsmallest', 'nlargest'], range(1, 11), - [['a'], - ['c'], - ['a', 'b'], - ['a', 'c'], - ['b', 'a'], - ['b', 'c'], - ['a', 'b', 'c'], - ['c', 'a', 'b'], - ['c', 'b', 'a'], - ['b', 'c', 'a'], - ['b', 'a', 'c'], - - # dups! - ['b', 'c', 'c'], - - ])) - def test_n(self, df_strings, method, n, order): + @pytest.mark.parametrize('order', [ + ['a'], + ['c'], + ['a', 'b'], + ['a', 'c'], + ['b', 'a'], + ['b', 'c'], + ['a', 'b', 'c'], + ['c', 'a', 'b'], + ['c', 'b', 'a'], + ['b', 'c', 'a'], + ['b', 'a', 'c'], + + # dups! + ['b', 'c', 'c']]) + @pytest.mark.parametrize('n', range(1, 11)) + def test_n(self, df_strings, nselect_method, n, order): # GH10393 df = df_strings if 'b' in order: error_msg = self.dtype_error_msg_template.format( - column='b', method=method, dtype='object') + column='b', method=nselect_method, dtype='object') with tm.assert_raises_regex(TypeError, error_msg): - getattr(df, method)(n, order) + getattr(df, nselect_method)(n, order) else: - ascending = method == 'nsmallest' - result = getattr(df, method)(n, order) + ascending = nselect_method == 'nsmallest' + result = getattr(df, nselect_method)(n, order) expected = df.sort_values(order, ascending=ascending).head(n) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - 'method, columns', - product(['nsmallest', 'nlargest'], - product(['group'], ['category_string', 'string']) - )) - def test_n_error(self, df_main_dtypes, method, columns): + @pytest.mark.parametrize('columns', [ + ('group', 'category_string'), ('group', 'string')]) + def test_n_error(self, df_main_dtypes, nselect_method, columns): df = df_main_dtypes + col = columns[1] error_msg = self.dtype_error_msg_template.format( - column=columns[1], method=method, dtype=df[columns[1]].dtype) + column=col, method=nselect_method, dtype=df[col].dtype) # escape some characters that may be in the repr error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)") .replace("[", "\\[").replace("]", "\\]")) with tm.assert_raises_regex(TypeError, error_msg): - getattr(df, method)(2, columns) + getattr(df, nselect_method)(2, columns) def test_n_all_dtypes(self, df_main_dtypes): df = df_main_dtypes @@ -2308,15 +2431,14 @@ def test_n_identical_values(self): expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]}) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - 'n, order', - product([1, 2, 3, 4, 5], - [['a', 'b', 'c'], - ['c', 'b', 'a'], - ['a'], - ['b'], - ['a', 'b'], - ['c', 'b']])) + @pytest.mark.parametrize('order', [ + ['a', 'b', 'c'], + ['c', 'b', 'a'], + ['a'], + ['b'], + ['a', 'b'], + ['c', 'b']]) + @pytest.mark.parametrize('n', range(1, 6)) def test_n_duplicate_index(self, df_duplicates, n, order): # GH 13412 diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ac46f02d00773..dfb2961befe35 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -554,6 +554,14 @@ def test_apply_non_numpy_dtype(self): result = df.apply(lambda x: x) assert_frame_equal(result, df) + def test_apply_dup_names_multi_agg(self): + # GH 21063 + df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a']) + expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min']) + result = df.agg(['min']) + + tm.assert_frame_equal(result, expected) + class TestInferOutputShape(object): # the user has supplied an opaque UDF where diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 28e82f7585850..004fb4eb0c128 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -10,7 +10,7 @@ import numpy as np from pandas.compat import lrange, lzip, u -from pandas import (compat, DataFrame, Series, Index, MultiIndex, +from pandas import (compat, DataFrame, Series, Index, MultiIndex, Categorical, date_range, isna) import pandas as pd @@ -1129,6 +1129,19 @@ def test_reindex_multi(self): assert_frame_equal(result, expected) + def test_reindex_multi_categorical_time(self): + # https://github.com/pandas-dev/pandas/issues/21390 + midx = pd.MultiIndex.from_product( + [Categorical(['a', 'b', 'c']), + Categorical(date_range("2012-01-01", periods=3, freq='H'))]) + df = pd.DataFrame({'a': range(len(midx))}, index=midx) + df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]] + + result = df2.reindex(midx) + expected = pd.DataFrame( + {'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) + assert_frame_equal(result, expected) + data = [[1, 2, 3], [1, 2, 3]] @pytest.mark.parametrize('actual', [ @@ -1151,3 +1164,18 @@ def test_raise_on_drop_duplicate_index(self, actual): expected_no_err = actual.T.drop('c', axis=1, level=level, errors='ignore') assert_frame_equal(expected_no_err.T, actual) + + @pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]]) + @pytest.mark.parametrize('drop_labels', [[], [1], [2]]) + def test_drop_empty_list(self, index, drop_labels): + # GH 21494 + expected_index = [i for i in index if i not in drop_labels] + frame = pd.DataFrame(index=index).drop(drop_labels) + tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index)) + + @pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]]) + @pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]]) + def test_drop_non_empty_list(self, index, drop_labels): + # GH 21494 + with tm.assert_raises_regex(KeyError, 'not found in axis'): + pd.DataFrame(index=index).drop(drop_labels) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 6dd38187f7277..70dd358248bc4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -151,6 +151,17 @@ def test_constructor_complex_dtypes(self): assert a.dtype == df.a.dtype assert b.dtype == df.b.dtype + def test_constructor_dtype_str_na_values(self, string_dtype): + # https://github.com/pandas-dev/pandas/issues/21083 + df = DataFrame({'A': ['x', None]}, dtype=string_dtype) + result = df.isna() + expected = DataFrame({"A": [False, True]}) + tm.assert_frame_equal(result, expected) + assert df.iloc[1, 0] is None + + df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype) + assert np.isnan(df.iloc[1, 0]) + def test_constructor_rec(self): rec = self.frame.to_records(index=False) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 4c9f8c2ea0980..1eeeec0be3b8b 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -794,22 +794,26 @@ def test_arg_for_errors_in_astype(self): @pytest.mark.parametrize('input_vals', [ ([1, 2]), - ([1.0, 2.0, np.nan]), (['1', '2']), (list(pd.date_range('1/1/2011', periods=2, freq='H'))), (list(pd.date_range('1/1/2011', periods=2, freq='H', tz='US/Eastern'))), ([pd.Interval(left=0, right=5)]), ]) - def test_constructor_list_str(self, input_vals): + def test_constructor_list_str(self, input_vals, string_dtype): # GH 16605 # Ensure that data elements are converted to strings when # dtype is str, 'str', or 'U' - for dtype in ['str', str, 'U']: - result = DataFrame({'A': input_vals}, dtype=dtype) - expected = DataFrame({'A': input_vals}).astype({'A': dtype}) - assert_frame_equal(result, expected) + result = DataFrame({'A': input_vals}, dtype=string_dtype) + expected = DataFrame({'A': input_vals}).astype({'A': string_dtype}) + assert_frame_equal(result, expected) + + def test_constructor_list_str_na(self, string_dtype): + + result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype) + expected = DataFrame({"A": ['1.0', '2.0', None]}, dtype=object) + assert_frame_equal(result, expected) class TestDataFrameDatetimeWithTZ(TestData): diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 5df50f3d7835b..fdf50805ad818 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -3,6 +3,7 @@ from __future__ import print_function from collections import deque from datetime import datetime +from decimal import Decimal import operator import pytest @@ -282,6 +283,17 @@ def test_neg_numeric(self, df, expected): assert_frame_equal(-df, expected) assert_series_equal(-df['a'], expected['a']) + @pytest.mark.parametrize('df, expected', [ + (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)), + ([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]), + ]) + def test_neg_object(self, df, expected): + # GH 21380 + df = pd.DataFrame({'a': df}) + expected = pd.DataFrame({'a': expected}) + assert_frame_equal(-df, expected) + assert_series_equal(-df['a'], expected['a']) + @pytest.mark.parametrize('df', [ pd.DataFrame({'a': ['a', 'b']}), pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), @@ -307,6 +319,15 @@ def test_pos_numeric(self, df): @pytest.mark.parametrize('df', [ pd.DataFrame({'a': ['a', 'b']}), + pd.DataFrame({'a': np.array([-1, 2], dtype=object)}), + pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}), + ]) + def test_pos_object(self, df): + # GH 21380 + assert_frame_equal(+df, df) + assert_series_equal(+df['a'], df['a']) + + @pytest.mark.parametrize('df', [ pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), ]) def test_pos_raises(self, df): diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index d89731dc09044..ebf6c5e37b916 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -560,6 +560,16 @@ def test_unstack_dtypes(self): assert left.shape == (3, 2) tm.assert_frame_equal(left, right) + def test_unstack_non_unique_index_names(self): + idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], + names=['c1', 'c1']) + df = DataFrame([1, 2], index=idx) + with pytest.raises(ValueError): + df.unstack('c1') + + with pytest.raises(ValueError): + df.T.stack('c1') + def test_unstack_unused_levels(self): # GH 17845: unused labels in index make unstack() cast int to float idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1] @@ -861,6 +871,23 @@ def test_stack_preserve_categorical_dtype(self): tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("level", [0, 'baz']) + def test_unstack_swaplevel_sortlevel(self, level): + # GH 20994 + mi = pd.MultiIndex.from_product([[0], ['d', 'c']], + names=['bar', 'baz']) + df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A']) + df.columns.name = 'foo' + + expected = pd.DataFrame([ + [3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([ + ('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[ + 'baz', 'foo'])) + expected.index.name = 'bar' + + result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) + tm.assert_frame_equal(result, expected) + def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index b60eb89e87da5..599ae683f914b 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -550,18 +550,36 @@ def test_sort_index(self): expected = frame.iloc[:, ::-1] assert_frame_equal(result, expected) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): # GH13496 # sort rows by specified level of multi-index - mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) + mi = MultiIndex.from_tuples([ + [2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi) + + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 2], + [2, 1, 3]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [3, 4], + [1, 2]], index=expected_mi) + result = df.sort_index(level=level) + assert_frame_equal(result, expected) - # MI sort, but no level: sort_level has no effect - mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) - result = df.sort_index(sort_remaining=False) - expected = df.sort_index() + # sort_remaining=False + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 3], + [2, 1, 2]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [1, 2], + [3, 4]], index=expected_mi) + result = df.sort_index(level=level, sort_remaining=False) assert_frame_equal(result, expected) def test_sort_index_intervalindex(self): diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 90fbc6e628369..fb9bd74d9876d 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -506,7 +506,15 @@ def test_asfreq_fillvalue(self): actual_series = ts.asfreq(freq='1S', fill_value=9.0) assert_series_equal(expected_series, actual_series) - def test_first_last_valid(self): + @pytest.mark.parametrize("data,idx,expected_first,expected_last", [ + ({'A': [1, 2, 3]}, [1, 1, 2], 1, 2), + ({'A': [1, 2, 3]}, [1, 2, 2], 1, 2), + ({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'), + ({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2), + ({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2), + ({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)]) + def test_first_last_valid(self, data, idx, + expected_first, expected_last): N = len(self.frame.index) mat = randn(N) mat[:5] = nan @@ -539,6 +547,11 @@ def test_first_last_valid(self): assert frame.first_valid_index().freq == frame.index.freq assert frame.last_valid_index().freq == frame.index.freq + # GH 21441 + df = DataFrame(data, index=idx) + assert expected_first == df.first_valid_index() + assert expected_last == df.last_valid_index() + def test_first_subset(self): ts = tm.makeTimeDataFrame(freq='12h') result = ts.first('10d') diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index fa589a0aa4817..3956968173070 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -133,3 +133,13 @@ def test_frame_reset_index(self, tz): xp = df.index.tz rs = roundtripped.index.tz assert xp == rs + + @pytest.mark.parametrize('tz', [None, 'America/New_York']) + def test_boolean_compare_transpose_tzindex_with_dst(self, tz): + # GH 19970 + idx = date_range('20161101', '20161130', freq='4H', tz=tz) + df = DataFrame({'a': range(len(idx)), 'b': range(len(idx))}, + index=idx) + result = df.T == df.T + expected = DataFrame(True, index=list('ab'), columns=idx) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index e4829ebf48561..3ad25ae73109e 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -9,6 +9,7 @@ import numpy as np from pandas.compat import (lmap, range, lrange, StringIO, u) +from pandas.io.common import _get_handle import pandas.core.common as com from pandas.errors import ParserError from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp, @@ -919,29 +920,46 @@ def test_to_csv_path_is_none(self): recons = pd.read_csv(StringIO(csv_str), index_col=0) assert_frame_equal(self.frame, recons) - def test_to_csv_compression(self, compression): - - df = DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - index=['A', 'B'], columns=['X', 'Y', 'Z']) + @pytest.mark.parametrize('df,encoding', [ + (DataFrame([[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + index=['A', 'B'], columns=['X', 'Y', 'Z']), None), + # GH 21241, 21118 + (DataFrame([['abc', 'def', 'ghi']], columns=['X', 'Y', 'Z']), 'ascii'), + (DataFrame(5 * [[123, u"你好", u"世界"]], + columns=['X', 'Y', 'Z']), 'gb2312'), + (DataFrame(5 * [[123, u"Γειά σου", u"Κόσμε"]], + columns=['X', 'Y', 'Z']), 'cp737') + ]) + def test_to_csv_compression(self, df, encoding, compression): with ensure_clean() as filename: - df.to_csv(filename, compression=compression) - + df.to_csv(filename, compression=compression, encoding=encoding) # test the round trip - to_csv -> read_csv - rs = read_csv(filename, compression=compression, - index_col=0) - assert_frame_equal(df, rs) + result = read_csv(filename, compression=compression, + index_col=0, encoding=encoding) + assert_frame_equal(df, result) + + # test the round trip using file handle - to_csv -> read_csv + f, _handles = _get_handle(filename, 'w', compression=compression, + encoding=encoding) + with f: + df.to_csv(f, encoding=encoding) + result = pd.read_csv(filename, compression=compression, + encoding=encoding, index_col=0, squeeze=True) + assert_frame_equal(df, result) # explicitly make sure file is compressed with tm.decompress_file(filename, compression) as fh: - text = fh.read().decode('utf8') + text = fh.read().decode(encoding or 'utf8') for col in df.columns: assert col in text with tm.decompress_file(filename, compression) as fh: - assert_frame_equal(df, read_csv(fh, index_col=0)) + assert_frame_equal(df, read_csv(fh, + index_col=0, + encoding=encoding)) def test_to_csv_date_format(self): with ensure_clean('__tmp_to_csv_date_format__') as path: diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index e0793b8e1bd64..d021396a7acb3 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -6,6 +6,7 @@ import numpy as np import pandas as pd +from pandas.compat import PY37 from pandas import (Index, MultiIndex, CategoricalIndex, DataFrame, Categorical, Series, qcut) from pandas.util.testing import assert_frame_equal, assert_series_equal @@ -205,6 +206,7 @@ def test_level_get_group(observed): assert_frame_equal(result, expected) +@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636") @pytest.mark.parametrize('ordered', [True, False]) def test_apply(ordered): # GH 10138 @@ -553,15 +555,11 @@ def test_as_index(): columns=['cat', 'A', 'B']) tm.assert_frame_equal(result, expected) - # another not in-axis grouper - s = Series(['a', 'b', 'b'], name='cat2') + # another not in-axis grouper (conflicting names in index) + s = Series(['a', 'b', 'b'], name='cat') result = df.groupby(['cat', s], as_index=False, observed=True).sum() tm.assert_frame_equal(result, expected) - # GH18872: conflicting names in desired index - with pytest.raises(ValueError): - df.groupby(['cat', s.rename('cat')], observed=True).sum() - # is original index dropped? group_columns = ['cat', 'A'] expected = DataFrame( @@ -852,3 +850,23 @@ def test_empty_prod(): result = df.groupby("A", observed=False).B.prod(min_count=1) expected = pd.Series([2, 1, np.nan], expected_idx, name='B') tm.assert_series_equal(result, expected) + + +def test_groupby_multiindex_categorical_datetime(): + # https://github.com/pandas-dev/pandas/issues/21390 + + df = pd.DataFrame({ + 'key1': pd.Categorical(list('abcbabcba')), + 'key2': pd.Categorical( + list(pd.date_range('2018-06-01 00', freq='1T', periods=3)) * 3), + 'values': np.arange(9), + }) + result = df.groupby(['key1', 'key2']).mean() + + idx = pd.MultiIndex.from_product( + [pd.Categorical(['a', 'b', 'c']), + pd.Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))], + names=['key1', 'key2']) + expected = pd.DataFrame( + {'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx) + assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e05f9de5ea7f4..66577d738dd28 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1674,3 +1674,22 @@ def test_tuple_correct_keyerror(): [3, 4]])) with tm.assert_raises_regex(KeyError, "(7, 8)"): df.groupby((7, 8)).mean() + + +def test_groupby_agg_ohlc_non_first(): + # GH 21716 + df = pd.DataFrame([[1], [1]], columns=['foo'], + index=pd.date_range('2018-01-01', periods=2, freq='D')) + + expected = pd.DataFrame([ + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1] + ], columns=pd.MultiIndex.from_tuples(( + ('foo', 'ohlc', 'open'), ('foo', 'ohlc', 'high'), + ('foo', 'ohlc', 'low'), ('foo', 'ohlc', 'close'), + ('foo', 'sum', 'foo'))), index=pd.date_range( + '2018-01-01', periods=2, freq='D')) + + result = df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc']) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 6ad8b4905abff..d978e144e5013 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -1,7 +1,7 @@ import pytest import numpy as np import pandas as pd -from pandas import DataFrame, concat +from pandas import DataFrame, Series, concat from pandas.util import testing as tm @@ -59,9 +59,9 @@ def test_rank_apply(): ('first', False, False, [3., 4., 1., 5., 2.]), ('first', False, True, [.6, .8, .2, 1., .4]), ('dense', True, False, [1., 1., 3., 1., 2.]), - ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]), + ('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]), ('dense', False, False, [3., 3., 1., 3., 2.]), - ('dense', False, True, [.6, .6, .2, .6, .4]), + ('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]), ]) def test_rank_args(grps, vals, ties_method, ascending, pct, exp): key = np.repeat(grps, len(vals)) @@ -126,7 +126,7 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp): @pytest.mark.parametrize("grps", [ ['qux'], ['qux', 'quux']]) @pytest.mark.parametrize("vals", [ - [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats + [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan, pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-06'), np.nan, np.nan] @@ -167,11 +167,11 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp): ('dense', True, 'keep', False, [1., 1., np.nan, 3., 1., 2., np.nan, np.nan]), ('dense', True, 'keep', True, - [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]), + [1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]), ('dense', False, 'keep', False, [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]), ('dense', False, 'keep', True, - [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]), + [3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]), ('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]), ('average', True, 'no_na', True, [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]), @@ -198,10 +198,10 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp): [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]), ('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]), ('dense', True, 'no_na', True, - [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]), + [0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]), ('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]), ('dense', False, 'no_na', True, - [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5]) + [0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.]) ]) def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp): @@ -252,3 +252,20 @@ def test_rank_object_raises(ties_method, ascending, na_option, df.groupby('key').rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct) + + +def test_rank_empty_group(): + # see gh-22519 + column = "A" + df = DataFrame({ + "A": [0, 1, 0], + "B": [1., np.nan, 2.] + }) + + result = df.groupby(column).B.rank(pct=True) + expected = Series([0.5, np.nan, 1.0], name="B") + tm.assert_series_equal(result, expected) + + result = df.groupby(column).rank(pct=True) + expected = DataFrame({"B": [0.5, np.nan, 1.0]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 626057c1ea760..7fccf1f57a886 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -721,6 +721,23 @@ def interweave(list_obj): assert_frame_equal(result, exp) +@pytest.mark.parametrize("fill_method", ['ffill', 'bfill']) +def test_pad_stable_sorting(fill_method): + # GH 21207 + x = [0] * 20 + y = [np.nan] * 10 + [1] * 10 + + if fill_method == 'bfill': + y = y[::-1] + + df = pd.DataFrame({'x': x, 'y': y}) + expected = df.copy() + + result = getattr(df.groupby('x'), fill_method)() + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("test_series", [True, False]) @pytest.mark.parametrize("periods,fill_method,limit", [ (1, 'ffill', None), (1, 'ffill', 1), diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index dae69a86910af..b138b79caac76 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -469,6 +469,15 @@ def test_constructor_with_non_normalized_pytz(self, tz): result = DatetimeIndex(['2010'], tz=non_norm_tz) assert pytz.timezone(tz) is result.tz + def test_constructor_timestamp_near_dst(self): + # GH 20854 + ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'), + Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')] + result = DatetimeIndex(ts) + expected = DatetimeIndex([ts[0].to_pydatetime(), + ts[1].to_pydatetime()]) + tm.assert_index_equal(result, expected) + class TestTimeSeries(object): diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 193804b66395b..ec37bbbcb6c02 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -278,6 +278,20 @@ def test_wom_len(self, periods): res = date_range(start='20110101', periods=periods, freq='WOM-1MON') assert len(res) == periods + def test_construct_over_dst(self): + # GH 20854 + pre_dst = Timestamp('2010-11-07 01:00:00').tz_localize('US/Pacific', + ambiguous=True) + pst_dst = Timestamp('2010-11-07 01:00:00').tz_localize('US/Pacific', + ambiguous=False) + expect_data = [Timestamp('2010-11-07 00:00:00', tz='US/Pacific'), + pre_dst, + pst_dst] + expected = DatetimeIndex(expect_data) + result = date_range(start='2010-11-7', periods=3, + freq='H', tz='US/Pacific') + tm.assert_index_equal(result, expected) + class TestGenRangeGeneration(object): diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 056924f2c6663..743cbc107cce5 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -1,5 +1,6 @@ import locale import calendar +import unicodedata import pytest @@ -7,7 +8,7 @@ import pandas as pd import pandas.util.testing as tm from pandas import (Index, DatetimeIndex, datetime, offsets, - date_range, Timestamp) + date_range, Timestamp, compat) class TestTimeSeries(object): @@ -284,10 +285,24 @@ def test_datetime_name_accessors(self, time_locale): dti = DatetimeIndex(freq='M', start='2012', end='2013') result = dti.month_name(locale=time_locale) expected = Index([month.capitalize() for month in expected_months]) + + # work around different normalization schemes + # https://github.com/pandas-dev/pandas/issues/22342 + if not compat.PY2: + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") + tm.assert_index_equal(result, expected) + for date, expected in zip(dti, expected_months): result = date.month_name(locale=time_locale) - assert result == expected.capitalize() + expected = expected.capitalize() + + if not compat.PY2: + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", result) + + assert result == expected dti = dti.append(DatetimeIndex([pd.NaT])) assert np.isnan(dti.month_name(locale=time_locale)[-1]) diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 9180bb0af3af3..801dcb91b124e 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -134,6 +134,21 @@ def test_round(self, tz): ts = '2016-10-17 12:00:00.001501031' DatetimeIndex([ts]).round('1010ns') + def test_no_rounding_occurs(self, tz): + # GH 21262 + rng = date_range(start='2016-01-01', periods=5, + freq='2Min', tz=tz) + + expected_rng = DatetimeIndex([ + Timestamp('2016-01-01 00:00:00', tz=tz, freq='2T'), + Timestamp('2016-01-01 00:02:00', tz=tz, freq='2T'), + Timestamp('2016-01-01 00:04:00', tz=tz, freq='2T'), + Timestamp('2016-01-01 00:06:00', tz=tz, freq='2T'), + Timestamp('2016-01-01 00:08:00', tz=tz, freq='2T'), + ]) + + tm.assert_index_equal(rng.round(freq='2T'), expected_rng) + @pytest.mark.parametrize('test_input, rounder, freq, expected', [ (['2117-01-01 00:00:45'], 'floor', '15s', ['2117-01-01 00:00:45']), (['2117-01-01 00:00:45'], 'ceil', '15s', ['2117-01-01 00:00:45']), @@ -143,6 +158,10 @@ def test_round(self, tz): ['1823-01-01 00:00:01.000000020']), (['1823-01-01 00:00:01'], 'floor', '1s', ['1823-01-01 00:00:01']), (['1823-01-01 00:00:01'], 'ceil', '1s', ['1823-01-01 00:00:01']), + (['2018-01-01 00:15:00'], 'ceil', '15T', ['2018-01-01 00:15:00']), + (['2018-01-01 00:15:00'], 'floor', '15T', ['2018-01-01 00:15:00']), + (['1823-01-01 03:00:00'], 'ceil', '3H', ['1823-01-01 03:00:00']), + (['1823-01-01 03:00:00'], 'floor', '3H', ['1823-01-01 03:00:00']), (('NaT', '1823-01-01 00:00:01'), 'floor', '1s', ('NaT', '1823-01-01 00:00:01')), (('NaT', '1823-01-01 00:00:01'), 'ceil', '1s', diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 09210d8b64d1b..573940edaa08f 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -2,7 +2,7 @@ """ Tests for DatetimeIndex timezone-related methods """ -from datetime import datetime, timedelta, tzinfo +from datetime import datetime, timedelta, tzinfo, date, time from distutils.version import LooseVersion import pytest @@ -706,6 +706,32 @@ def test_join_utc_convert(self, join_type): assert isinstance(result, DatetimeIndex) assert result.tz.zone == 'UTC' + @pytest.mark.parametrize("dtype", [ + None, 'datetime64[ns, CET]', + 'datetime64[ns, EST]', 'datetime64[ns, UTC]' + ]) + def test_date_accessor(self, dtype): + # Regression test for GH#21230 + expected = np.array([date(2018, 6, 4), pd.NaT]) + + index = DatetimeIndex(['2018-06-04 10:00:00', pd.NaT], dtype=dtype) + result = index.date + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [ + None, 'datetime64[ns, CET]', + 'datetime64[ns, EST]', 'datetime64[ns, UTC]' + ]) + def test_time_accessor(self, dtype): + # Regression test for GH#21267 + expected = np.array([time(10, 20, 30), pd.NaT]) + + index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], dtype=dtype) + result = index.time + + tm.assert_numpy_array_equal(result, expected) + def test_dti_drop_dont_lose_tz(self): # GH#2621 ind = date_range("2012-12-01", periods=10, tz="utc") diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 45be3974dad63..8b0514764b0c0 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -650,6 +650,14 @@ def test_unit_mixed(self, cache): with pytest.raises(ValueError): pd.to_datetime(arr, errors='raise', cache=cache) + @pytest.mark.parametrize('cache', [True, False]) + def test_unit_rounding(self, cache): + # GH 14156: argument will incur floating point errors but no + # premature rounding + result = pd.to_datetime(1434743731.8770001, unit='s', cache=cache) + expected = pd.Timestamp('2015-06-19 19:55:31.877000093') + assert result == expected + @pytest.mark.parametrize('cache', [True, False]) def test_dataframe(self, cache): diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index 5fdf92dcb2044..b1711c3444586 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -6,8 +6,9 @@ from pandas import ( Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical, - date_range, timedelta_range, period_range, notna) + CategoricalIndex, date_range, timedelta_range, period_range, notna) from pandas.compat import lzip +from pandas.core.dtypes.common import is_categorical_dtype from pandas.core.dtypes.dtypes import IntervalDtype import pandas.core.common as com import pandas.util.testing as tm @@ -111,6 +112,22 @@ def test_constructor_string(self, constructor, breaks): with tm.assert_raises_regex(TypeError, msg): constructor(**self.get_kwargs_from_breaks(breaks)) + @pytest.mark.parametrize('cat_constructor', [ + Categorical, CategoricalIndex]) + def test_constructor_categorical_valid(self, constructor, cat_constructor): + # GH 21243/21253 + if isinstance(constructor, partial) and constructor.func is Index: + # Index is defined to create CategoricalIndex from categorical data + pytest.skip() + + breaks = np.arange(10, dtype='int64') + expected = IntervalIndex.from_breaks(breaks) + + cat_breaks = cat_constructor(breaks) + result_kwargs = self.get_kwargs_from_breaks(cat_breaks) + result = constructor(**result_kwargs) + tm.assert_index_equal(result, expected) + def test_generic_errors(self, constructor): # filler input data to be used when supplying invalid kwargs filler = self.get_kwargs_from_breaks(range(10)) @@ -238,6 +255,8 @@ def get_kwargs_from_breaks(self, breaks, closed='right'): tuples = lzip(breaks[:-1], breaks[1:]) if isinstance(breaks, (list, tuple)): return {'data': tuples} + elif is_categorical_dtype(breaks): + return {'data': breaks._constructor(tuples)} return {'data': com._asarray_tuplesafe(tuples)} def test_constructor_errors(self): @@ -286,6 +305,8 @@ def get_kwargs_from_breaks(self, breaks, closed='right'): if isinstance(breaks, list): return {'data': ivs} + elif is_categorical_dtype(breaks): + return {'data': breaks._constructor(ivs)} return {'data': np.array(ivs, dtype=object)} def test_generic_errors(self, constructor): diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 0fadfcf0c7f28..29fe2b0185662 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -110,6 +110,8 @@ def test_constructor_timedelta(self, closed, name, freq, periods): @pytest.mark.parametrize('start, end, freq, expected_endpoint', [ (0, 10, 3, 9), + (0, 10, 1.5, 9), + (0.5, 10, 3, 9.5), (Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')), (Timestamp('2018-01-01'), Timestamp('2018-02-09'), @@ -125,6 +127,22 @@ def test_early_truncation(self, start, end, freq, expected_endpoint): result_endpoint = result.right[-1] assert result_endpoint == expected_endpoint + @pytest.mark.parametrize('start, end, freq', [ + (0.5, None, None), + (None, 4.5, None), + (0.5, None, 1.5), + (None, 6.5, 1.5)]) + def test_no_invalid_float_truncation(self, start, end, freq): + # GH 21161 + if freq is None: + breaks = [0.5, 1.5, 2.5, 3.5, 4.5] + else: + breaks = [0.5, 2.0, 3.5, 5.0, 6.5] + expected = IntervalIndex.from_breaks(breaks) + + result = interval_range(start=start, end=end, periods=4, freq=freq) + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('start, mid, end', [ (Timestamp('2018-03-10', tz='US/Eastern'), Timestamp('2018-03-10 23:30:00', tz='US/Eastern'), diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f4fa547574b9e..a0d6907055a2e 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -419,21 +419,24 @@ def test_constructor_dtypes_timedelta(self, attr, klass): result = klass(list(values), dtype=dtype) tm.assert_index_equal(result, index) - def test_constructor_empty_gen(self): - skip_index_keys = ["repeats", "periodIndex", "rangeIndex", - "tuples"] - for key, index in self.generate_index_types(skip_index_keys): - empty = index.__class__([]) - assert isinstance(empty, index.__class__) - assert not len(empty) + @pytest.mark.parametrize("value", [[], iter([]), (x for x in [])]) + @pytest.mark.parametrize("klass", + [Index, Float64Index, Int64Index, UInt64Index, + CategoricalIndex, DatetimeIndex, TimedeltaIndex]) + def test_constructor_empty(self, value, klass): + empty = klass(value) + assert isinstance(empty, klass) + assert not len(empty) @pytest.mark.parametrize("empty,klass", [ (PeriodIndex([], freq='B'), PeriodIndex), + (PeriodIndex(iter([]), freq='B'), PeriodIndex), + (PeriodIndex((x for x in []), freq='B'), PeriodIndex), (RangeIndex(step=1), pd.RangeIndex), (MultiIndex(levels=[[1, 2], ['blue', 'red']], labels=[[], []]), MultiIndex) ]) - def test_constructor_empty(self, empty, klass): + def test_constructor_empty_special(self, empty, klass): assert isinstance(empty, klass) assert not len(empty) @@ -455,6 +458,13 @@ def test_constructor_nonhashable_name(self, indices): tm.assert_raises_regex(TypeError, message, indices.set_names, names=renamed) + def test_constructor_overflow_int64(self): + # see gh-15832 + msg = ("the elements provided in the data cannot " + "all be casted to the dtype int64") + with tm.assert_raises_regex(OverflowError, msg): + Index([np.iinfo(np.uint64).max - 1], dtype="int64") + def test_view_with_args(self): restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex', @@ -2088,6 +2098,17 @@ def test_get_duplicates_deprecated(self): with tm.assert_produces_warning(FutureWarning): index.get_duplicates() + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip('IPython', minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; idx = pd.Index([1, 2])" + ip.run_code(code) + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('idx.', 4)) + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 6a1a1a5bdba4f..a2a4170256088 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -543,38 +543,53 @@ def test_reindex_empty_index(self): tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) - def test_is_monotonic(self): - c = CategoricalIndex([1, 2, 3]) + @pytest.mark.parametrize('data, non_lexsorted_data', [ + [[1, 2, 3], [9, 0, 1, 2, 3]], + [list('abc'), list('fabcd')], + ]) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], ordered=True) + c = CategoricalIndex(data, ordered=True) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data)) assert not c.is_monotonic_increasing assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data), ordered=True) assert not c.is_monotonic_increasing - assert not c.is_monotonic_decreasing + assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True) + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) assert not c.is_monotonic_increasing - assert c.is_monotonic_decreasing + assert not c.is_monotonic_decreasing # non lexsorted categories - categories = [9, 0, 1, 2, 3] + categories = non_lexsorted_data - c = CategoricalIndex([9, 0], categories=categories) + c = CategoricalIndex(categories[:2], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([0, 1], categories=categories) + c = CategoricalIndex(categories[1:3], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing + @pytest.mark.parametrize('values, expected', [ + ([1, 2, 3], True), + ([1, 3, 1], False), + (list('abc'), True), + (list('aba'), False)]) + def test_is_unique(self, values, expected): + ci = CategoricalIndex(values) + assert ci.is_unique is expected + def test_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo') diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 37f70090c179f..a7e90207c9ad7 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -12,8 +12,8 @@ import pandas as pd -from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex, - compat, date_range, period_range) +from pandas import (CategoricalIndex, Categorical, DataFrame, Index, + MultiIndex, compat, date_range, period_range) from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.core.dtypes.dtypes import CategoricalDtype @@ -164,6 +164,22 @@ def test_set_name_methods(self): assert res is None assert ind.names == new_names2 + @pytest.mark.parametrize('inplace', [True, False]) + def test_set_names_with_nlevel_1(self, inplace): + # GH 21149 + # Ensure that .set_names for MultiIndex with + # nlevels == 1 does not raise any errors + expected = pd.MultiIndex(levels=[[0, 1]], + labels=[[0, 1]], + names=['first']) + m = pd.MultiIndex.from_product([[0, 1]]) + result = m.set_names('first', level=0, inplace=inplace) + + if inplace: + result = m + + tm.assert_index_equal(result, expected) + def test_set_levels_labels_directly(self): # setting levels/labels directly raises AttributeError @@ -639,22 +655,27 @@ def test_constructor_nonhashable_names(self): # With .set_names() tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed) - @pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'], - ['1', 'a', '1']]) + @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], + [1, 'a', 1]]) def test_duplicate_level_names(self, names): - # GH18872 - pytest.raises(ValueError, pd.MultiIndex.from_product, - [[0, 1]] * 3, names=names) + # GH18872, GH19029 + mi = pd.MultiIndex.from_product([[0, 1]] * 3, names=names) + assert mi.names == names # With .rename() mi = pd.MultiIndex.from_product([[0, 1]] * 3) - tm.assert_raises_regex(ValueError, "Duplicated level name:", - mi.rename, names) + mi = mi.rename(names) + assert mi.names == names # With .rename(., level=) - mi.rename(names[0], level=1, inplace=True) - tm.assert_raises_regex(ValueError, "Duplicated level name:", - mi.rename, names[:2], level=[0, 2]) + mi.rename(names[1], level=1, inplace=True) + mi = mi.rename([names[0], names[2]], level=[0, 2]) + assert mi.names == names + + def test_duplicate_level_names_access_raises(self): + self.index.names = ['foo', 'foo'] + tm.assert_raises_regex(KeyError, 'Level foo not found', + self.index._get_level_number, 'foo') def assert_multiindex_copied(self, copy, original): # Levels should be (at least, shallow copied) @@ -1165,12 +1186,12 @@ def test_iter(self): ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] assert result == expected - def test_legacy_pickle(self): + def test_legacy_pickle(self, datapath): if PY3: pytest.skip("testing for legacy pickles not " "support on py3") - path = tm.get_data_path('multiindex_v1.pickle') + path = datapath('indexes', 'data', 'multiindex_v1.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) @@ -1186,10 +1207,10 @@ def test_legacy_pickle(self): assert_almost_equal(res, exp) assert_almost_equal(exp, exp2) - def test_legacy_v2_unpickle(self): + def test_legacy_v2_unpickle(self, datapath): # 0.7.3 -> 0.8.0 format manage - path = tm.get_data_path('mindex_073.pickle') + path = datapath('indexes', 'data', 'mindex_073.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) @@ -1574,6 +1595,14 @@ def test_get_indexer_nearest(self): with pytest.raises(NotImplementedError): midx.get_indexer(['a'], method='pad', tolerance=2) + def test_get_indexer_categorical_time(self): + # https://github.com/pandas-dev/pandas/issues/21390 + midx = MultiIndex.from_product( + [Categorical(['a', 'b', 'c']), + Categorical(date_range("2012-01-01", periods=3, freq='H'))]) + result = midx.get_indexer(midx) + tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp)) + def test_hash_collisions(self): # non-smoke test that we don't get hash collisions @@ -3279,3 +3308,20 @@ def test_duplicate_multiindex_labels(self): with pytest.raises(ValueError): ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], inplace=True) + + def test_multiindex_compare(self): + # GH 21149 + # Ensure comparison operations for MultiIndex with nlevels == 1 + # behave consistently with those for MultiIndex with nlevels > 1 + + midx = pd.MultiIndex.from_product([[0, 1]]) + + # Equality self-test: MultiIndex object vs self + expected = pd.Series([True, True]) + result = pd.Series(midx == midx) + tm.assert_series_equal(result, expected) + + # Greater than comparison: MultiIndex object vs self + expected = pd.Series([False, False]) + result = pd.Series(midx > midx) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 8deb51e190bab..7623587803b41 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,32 +1,23 @@ -import os - import pytest from pandas.io.parsers import read_table -from pandas.util import testing as tm - - -@pytest.fixture -def parser_data(request): - return os.path.join(tm.get_data_path(), '..', 'parser', 'data') @pytest.fixture -def tips_file(parser_data): +def tips_file(datapath): """Path to the tips dataset""" - return os.path.join(parser_data, 'tips.csv') + return datapath('io', 'parser', 'data', 'tips.csv') @pytest.fixture -def jsonl_file(parser_data): +def jsonl_file(datapath): """Path a JSONL dataset""" - return os.path.join(parser_data, 'items.jsonl') + return datapath('io', 'parser', 'data', 'items.jsonl') @pytest.fixture -def salaries_table(parser_data): +def salaries_table(datapath): """DataFrame with the salaries dataset""" - path = os.path.join(parser_data, 'salaries.csv') - return read_table(path) + return read_table(datapath('io', 'parser', 'data', 'salaries.csv')) @pytest.fixture diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f221df93dd412..191e3f37f1c37 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -305,6 +305,44 @@ def test_repr_non_interactive(self): assert not has_truncated_repr(df) assert not has_expanded_repr(df) + def test_repr_truncates_terminal_size(self): + # https://github.com/pandas-dev/pandas/issues/21180 + # TODO: use mock fixutre. + # This is being backported, so doing it directly here. + try: + from unittest import mock + except ImportError: + mock = pytest.importorskip("mock") + + terminal_size = (118, 96) + p1 = mock.patch('pandas.io.formats.console.get_terminal_size', + return_value=terminal_size) + p2 = mock.patch('pandas.io.formats.format.get_terminal_size', + return_value=terminal_size) + + index = range(5) + columns = pd.MultiIndex.from_tuples([ + ('This is a long title with > 37 chars.', 'cat'), + ('This is a loooooonger title with > 43 chars.', 'dog'), + ]) + df = pd.DataFrame(1, index=index, columns=columns) + + with p1, p2: + result = repr(df) + + h1, h2 = result.split('\n')[:2] + assert 'long' in h1 + assert 'loooooonger' in h1 + assert 'cat' in h2 + assert 'dog' in h2 + + # regular columns + df2 = pd.DataFrame({"A" * 41: [1, 2], 'B' * 41: [1, 2]}) + with p1, p2: + result = repr(df2) + + assert df2.columns[0] in result.split('\n')[0] + def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() if term_width < 10 or term_height < 10: @@ -916,8 +954,8 @@ def test_unicode_problem_decoding_as_ascii(self): dm = DataFrame({u('c/\u03c3'): Series({'test': np.nan})}) compat.text_type(dm.to_string()) - def test_string_repr_encoding(self): - filepath = tm.get_data_path('unicode_series.csv') + def test_string_repr_encoding(self, datapath): + filepath = datapath('io', 'formats', 'data', 'unicode_series.csv') df = pd.read_csv(filepath, header=None, encoding='latin1') repr(df) repr(df[1]) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index dfa3751bff57a..36c4ae547ad4e 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -285,3 +285,18 @@ def test_to_csv_string_array_utf8(self): df.to_csv(path, encoding='utf-8') with open(path, 'r') as f: assert f.read() == expected_utf8 + + @tm.capture_stdout + def test_to_csv_stdout_file(self): + # GH 21561 + df = pd.DataFrame([['foo', 'bar'], ['baz', 'qux']], + columns=['name_1', 'name_2']) + expected_ascii = '''\ +,name_1,name_2 +0,foo,bar +1,baz,qux +''' + df.to_csv(sys.stdout, encoding='ascii') + output = sys.stdout.getvalue() + assert output == expected_ascii + assert not sys.stdout.closed diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index c9074ca49e5be..1b9cbc57865d2 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -2,6 +2,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import assert_frame_equal, assert_raises_regex @@ -21,16 +22,17 @@ def test_compression_roundtrip(compression): assert_frame_equal(df, pd.read_json(result)) -def test_read_zipped_json(): - uncompressed_path = tm.get_data_path("tsframe_v012.json") +def test_read_zipped_json(datapath): + uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json") uncompressed_df = pd.read_json(uncompressed_path) - compressed_path = tm.get_data_path("tsframe_v012.json.zip") + compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip") compressed_df = pd.read_json(compressed_path, compression='zip') assert_frame_equal(uncompressed_df, compressed_df) +@td.skip_if_not_us_locale def test_with_s3_url(compression): boto3 = pytest.importorskip('boto3') pytest.importorskip('s3fs') diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 49b39c17238ae..b6483d0e978ba 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -560,3 +560,16 @@ def test_multiindex(self, index_names): out = df.to_json(orient="table") result = pd.read_json(out, orient="table") tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("strict_check", [ + pytest.param(True, marks=pytest.mark.xfail), False]) + def test_empty_frame_roundtrip(self, strict_check): + # GH 21287 + df = pd.DataFrame([], columns=['a', 'b', 'c']) + expected = df.copy() + out = df.to_json(orient='table') + result = pd.read_json(out, orient='table') + # TODO: When DF coercion issue (#21345) is resolved tighten type checks + tm.assert_frame_equal(expected, result, + check_dtype=strict_check, + check_index_type=strict_check) diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 0fabaf747b6de..200a853c48900 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -123,6 +123,12 @@ def test_simple_normalize_with_separator(self, deep_nested): 'country', 'states_name']).sort_values() assert result.columns.sort_values().equals(expected) + def test_value_array_record_prefix(self): + # GH 21536 + result = json_normalize({'A': [1, 2]}, 'A', record_prefix='Prefix.') + expected = DataFrame([[1], [2]], columns=['Prefix.0']) + tm.assert_frame_equal(result, expected) + def test_more_deeply_nested(self, deep_nested): result = json_normalize(deep_nested, ['states', 'cities'], @@ -238,15 +244,16 @@ def test_non_ascii_key(self): tm.assert_frame_equal(result, expected) def test_missing_field(self, author_missing_data): - # GH20030: Checks for robustness of json_normalize - should - # unnest records where only the first record has a None value + # GH20030: result = json_normalize(author_missing_data) ex_data = [ - {'author_name.first': np.nan, + {'info': np.nan, + 'author_name.first': np.nan, 'author_name.last_name': np.nan, 'info.created_at': np.nan, 'info.last_updated': np.nan}, - {'author_name.first': 'Jane', + {'info': None, + 'author_name.first': 'Jane', 'author_name.last_name': 'Doe', 'info.created_at': '11/08/1993', 'info.last_updated': '26/05/2012'} @@ -351,9 +358,8 @@ def test_json_normalize_errors(self): errors='raise' ) - def test_nonetype_dropping(self): - # GH20030: Checks that None values are dropped in nested_to_record - # to prevent additional columns of nans when passed to DataFrame + def test_donot_drop_nonevalues(self): + # GH21356 data = [ {'info': None, 'author_name': @@ -367,7 +373,8 @@ def test_nonetype_dropping(self): ] result = nested_to_record(data) expected = [ - {'author_name.first': 'Smith', + {'info': None, + 'author_name.first': 'Smith', 'author_name.last_name': 'Appleseed'}, {'author_name.first': 'Jane', 'author_name.last_name': 'Doe', @@ -375,3 +382,61 @@ def test_nonetype_dropping(self): 'info.last_updated': '26/05/2012'}] assert result == expected + + def test_nonetype_top_level_bottom_level(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "country": { + "state": { + "id": None, + "town.info": { + "id": None, + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'id': None, + 'location.country.state.id': None, + 'location.country.state.town.info.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected + + def test_nonetype_multiple_levels(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "id": None, + "country": { + "id": None, + "state": { + "id": None, + "town.info": { + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'id': None, + 'location.id': None, + 'location.country.id': None, + 'location.country.state.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 7e497c395266f..b5a2be87de1c4 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -15,6 +15,7 @@ assert_series_equal, network, ensure_clean, assert_index_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() @@ -37,8 +38,9 @@ class TestPandasContainer(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(scope="function", autouse=True) + def setup(self, datapath): + self.dirpath = datapath("io", "json", "data") self.ts = tm.makeTimeSeries() self.ts.name = 'ts' @@ -59,7 +61,8 @@ def setup_method(self, method): self.mixed_frame = _mixed_frame.copy() self.categorical = _cat_frame.copy() - def teardown_method(self, method): + yield + del self.dirpath del self.ts @@ -1038,6 +1041,7 @@ def test_read_inline_jsonl(self): expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) assert_frame_equal(result, expected) + @td.skip_if_not_us_locale def test_read_s3_jsonl(self, s3_resource): # GH17200 diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 2423ddcd9a1a0..fb510f1a74556 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -54,20 +54,21 @@ def test_bad_stream_exception(self): # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. - handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') - # stream must be binary UTF8 - stream = codecs.StreamRecoder( - handle, utf8.encode, utf8.decode, codec.streamreader, - codec.streamwriter) + if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" - with tm.assert_raises_regex(UnicodeDecodeError, msg): - self.read_csv(stream) - stream.close() + + # stream must be binary UTF8 + with open(self.csv_shiftjs, "rb") as handle, codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, + codec.streamwriter) as stream: + + with tm.assert_raises_regex(UnicodeDecodeError, msg): + self.read_csv(stream) def test_read_csv(self): if not compat.PY3: @@ -76,7 +77,7 @@ def test_read_csv(self): else: prefix = u("file://") - fname = prefix + compat.text_type(self.csv1) + fname = prefix + compat.text_type(os.path.abspath(self.csv1)) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): @@ -237,6 +238,21 @@ def test_csv_mixed_type(self): out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) + def test_read_csv_low_memory_no_rows_with_index(self): + if self.engine == "c" and not self.low_memory: + pytest.skip("This is a low-memory specific test") + + # see gh-21141 + data = """A,B,C +1,1,1,2 +2,2,3,4 +3,3,4,5 +""" + out = self.read_csv(StringIO(data), low_memory=True, + index_col=0, nrows=0) + expected = DataFrame(columns=["A", "B", "C"]) + tm.assert_frame_equal(out, expected) + def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, @@ -635,21 +651,19 @@ def test_read_csv_parse_simple_list(self): tm.assert_frame_equal(df, expected) @tm.network - def test_url(self): + def test_url(self, datapath): # HTTP(S) url = ('https://raw.github.com/pandas-dev/pandas/master/' 'pandas/tests/io/parser/data/salaries.csv') url_table = self.read_table(url) - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'salaries.csv') + localtable = datapath('io', 'parser', 'data', 'salaries.csv') local_table = self.read_table(localtable) tm.assert_frame_equal(url_table, local_table) # TODO: ftp testing @pytest.mark.slow - def test_file(self): - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'salaries.csv') + def test_file(self, datapath): + localtable = datapath('io', 'parser', 'data', 'salaries.csv') local_table = self.read_table(localtable) try: @@ -739,8 +753,8 @@ def test_utf16_bom_skiprows(self): tm.assert_frame_equal(result, expected) - def test_utf16_example(self): - path = tm.get_data_path('utf16_ex.txt') + def test_utf16_example(self, datapath): + path = datapath('io', 'parser', 'data', 'utf16_ex.txt') # it works! and is the right length result = self.read_table(path, encoding='utf-16') @@ -751,8 +765,8 @@ def test_utf16_example(self): result = self.read_table(buf, encoding='utf-16') assert len(result) == 50 - def test_unicode_encoding(self): - pth = tm.get_data_path('unicode_series.csv') + def test_unicode_encoding(self, datapath): + pth = datapath('io', 'parser', 'data', 'unicode_series.csv') result = self.read_csv(pth, header=None, encoding='latin-1') result = result.set_index(0) @@ -1497,10 +1511,9 @@ def test_internal_eof_byte_to_file(self): result = self.read_csv(path) tm.assert_frame_equal(result, expected) - def test_sub_character(self): + def test_sub_character(self, datapath): # see gh-16893 - dirpath = tm.get_data_path() - filename = os.path.join(dirpath, "sub_char.csv") + filename = datapath('io', 'parser', 'data', 'sub_char.csv') expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) result = self.read_csv(filename) diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 01c6620e50d37..e4950af19ea95 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -110,20 +110,19 @@ def test_read_csv_infer_compression(self): # see gh-9770 expected = self.read_csv(self.csv1, index_col=0, parse_dates=True) - inputs = [self.csv1, self.csv1 + '.gz', - self.csv1 + '.bz2', open(self.csv1)] + with open(self.csv1) as f: + inputs = [self.csv1, self.csv1 + '.gz', + self.csv1 + '.bz2', f] - for f in inputs: - df = self.read_csv(f, index_col=0, parse_dates=True, - compression='infer') - - tm.assert_frame_equal(expected, df) + for inp in inputs: + df = self.read_csv(inp, index_col=0, parse_dates=True, + compression='infer') - inputs[3].close() + tm.assert_frame_equal(expected, df) - def test_read_csv_compressed_utf16_example(self): + def test_read_csv_compressed_utf16_example(self, datapath): # GH18071 - path = tm.get_data_path('utf16_ex_small.zip') + path = datapath('io', 'parser', 'data', 'utf16_ex_small.zip') result = self.read_csv(path, encoding='utf-16', compression='zip', sep='\t') diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index b91ce04673e29..8060ebf2fbcd4 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -125,9 +125,9 @@ def test_categorical_dtype_high_cardinality_numeric(self): np.sort(actual.a.cat.categories), ordered=True) tm.assert_frame_equal(actual, expected) - def test_categorical_dtype_encoding(self): + def test_categorical_dtype_encoding(self, datapath): # GH 10153 - pth = tm.get_data_path('unicode_series.csv') + pth = datapath('io', 'parser', 'data', 'unicode_series.csv') encoding = 'latin-1' expected = self.read_csv(pth, header=None, encoding=encoding) expected[1] = Categorical(expected[1]) @@ -135,7 +135,7 @@ def test_categorical_dtype_encoding(self): dtype={1: 'category'}) tm.assert_frame_equal(actual, expected) - pth = tm.get_data_path('utf16_ex.txt') + pth = datapath('io', 'parser', 'data', 'utf16_ex.txt') encoding = 'utf-16' expected = self.read_table(pth, encoding=encoding) expected = expected.apply(Categorical) diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index d2c3f82e95c4d..cc224efd533b7 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -369,3 +369,14 @@ def test_no_na_filter_on_index(self): expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index([np.nan, 5.0], name="b")) tm.assert_frame_equal(out, expected) + + def test_inf_na_values_with_int_index(self): + # see gh-17128 + data = "idx,col1,col2\n1,3,4\n2,inf,-inf" + + # Don't fail with OverflowError with infs and integer index column + out = self.read_csv(StringIO(data), index_col=[0], + na_values=['inf', '-inf']) + expected = DataFrame({"col1": [3, np.nan], "col2": [4, np.nan]}, + index=Index([1, 2], name="idx")) + tm.assert_frame_equal(out, expected) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index fdf45f307e953..72d2c5fd8d18f 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -48,11 +48,19 @@ def check_compressed_urls(salaries_table, compression, extension, mode, tm.assert_frame_equal(url_table, salaries_table) +@pytest.fixture +def tips_df(datapath): + """DataFrame with the tips dataset.""" + return read_csv(datapath('io', 'parser', 'data', 'tips.csv')) + + @pytest.mark.usefixtures("s3_resource") +@td.skip_if_not_us_locale() class TestS3(object): - def test_parse_public_s3_bucket(self): + def test_parse_public_s3_bucket(self, tips_df): pytest.importorskip('s3fs') + # more of an integration test due to the not-public contents portion # can probably mock this though. for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: @@ -60,45 +68,40 @@ def test_parse_public_s3_bucket(self): ext, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) # Read public file from bucket with not-public contents df = read_csv('s3://cant_get_it/tips.csv') assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3n_bucket(self): + def test_parse_public_s3n_bucket(self, tips_df): # Read from AWS s3 as "s3n" URL df = read_csv('s3n://pandas-test/tips.csv', nrows=10) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3a_bucket(self): + def test_parse_public_s3a_bucket(self, tips_df): # Read from AWS s3 as "s3a" URL df = read_csv('s3a://pandas-test/tips.csv', nrows=10) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_nrows(self): + def test_parse_public_s3_bucket_nrows(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, nrows=10, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_chunked(self): + def test_parse_public_s3_bucket_chunked(self, tips_df): # Read with a chunksize chunksize = 5 - local_tips = read_csv(tm.get_data_path('tips.csv')) for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df_reader = read_csv('s3://pandas-test/tips.csv' + ext, chunksize=chunksize, compression=comp) @@ -109,14 +112,13 @@ def test_parse_public_s3_bucket_chunked(self): df = df_reader.get_chunk() assert isinstance(df, DataFrame) assert not df.empty - true_df = local_tips.iloc[ + true_df = tips_df.iloc[ chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_chunked_python(self): + def test_parse_public_s3_bucket_chunked_python(self, tips_df): # Read with a chunksize using the Python parser chunksize = 5 - local_tips = read_csv(tm.get_data_path('tips.csv')) for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df_reader = read_csv('s3://pandas-test/tips.csv' + ext, chunksize=chunksize, compression=comp, @@ -127,36 +129,33 @@ def test_parse_public_s3_bucket_chunked_python(self): df = df_reader.get_chunk() assert isinstance(df, DataFrame) assert not df.empty - true_df = local_tips.iloc[ + true_df = tips_df.iloc[ chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_python(self): + def test_parse_public_s3_bucket_python(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_infer_s3_compression(self): + def test_infer_s3_compression(self, tips_df): for ext in ['', '.gz', '.bz2']: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression='infer') assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3_bucket_nrows_python(self): + def test_parse_public_s3_bucket_nrows_python(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', nrows=10, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) def test_s3_fails(self): with pytest.raises(IOError): diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 7717102b64fc5..b6f13039641a2 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import os +import pytest import pandas.util.testing as tm from pandas import read_csv, read_table, DataFrame @@ -45,8 +46,9 @@ def read_table(self, *args, **kwargs): def float_precision_choices(self): raise com.AbstractMethodError(self) - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath('io', 'parser', 'data') self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index ab4c14034cd20..c7026e3e0fc88 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -28,31 +28,26 @@ class TestTextReader(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath('io', 'parser', 'data') self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') def test_file_handle(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f) - result = reader.read() # noqa - finally: - f.close() + reader.read() def test_string_filename(self): reader = TextReader(self.csv1, header=None) reader.read() def test_file_handle_mmap(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f, memory_map=True, header=None) reader.read() - finally: - f.close() def test_StringIO(self): with open(self.csv1, 'rb') as f: diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 5da347e47957c..101ee3e619f5b 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -11,8 +11,9 @@ class TestSAS7BDAT(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "sas", "data") self.data = [] self.test_ix = [list(range(1, 16)), [16]] for j in 1, 2: @@ -123,9 +124,8 @@ def test_iterator_read_too_much(self): rdr.close() -def test_encoding_options(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "test1.sas7bdat") +def test_encoding_options(datapath): + fname = datapath("io", "sas", "data", "test1.sas7bdat") df1 = pd.read_sas(fname) df2 = pd.read_sas(fname, encoding='utf-8') for col in df1.columns: @@ -143,51 +143,48 @@ def test_encoding_options(): assert(x == y.decode()) -def test_productsales(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "productsales.sas7bdat") +def test_productsales(datapath): + fname = datapath("io", "sas", "data", "productsales.sas7bdat") df = pd.read_sas(fname, encoding='utf-8') - fname = os.path.join(dirpath, "productsales.csv") + fname = datapath("io", "sas", "data", "productsales.csv") df0 = pd.read_csv(fname, parse_dates=['MONTH']) vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] df0[vn] = df0[vn].astype(np.float64) tm.assert_frame_equal(df, df0) -def test_12659(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "test_12659.sas7bdat") +def test_12659(datapath): + fname = datapath("io", "sas", "data", "test_12659.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "test_12659.csv") + fname = datapath("io", "sas", "data", "test_12659.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0) -def test_airline(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "airline.sas7bdat") +def test_airline(datapath): + fname = datapath("io", "sas", "data", "airline.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "airline.csv") + fname = datapath("io", "sas", "data", "airline.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0, check_exact=False) -def test_date_time(): +def test_date_time(datapath): # Support of different SAS date/datetime formats (PR #15871) - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "datetime.sas7bdat") + fname = datapath("io", "sas", "data", "datetime.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "datetime.csv") + fname = datapath("io", "sas", "data", "datetime.csv") df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) + # GH 19732: Timestamps imported from sas will incur floating point errors + df.iloc[:, 3] = df.iloc[:, 3].dt.round('us') tm.assert_frame_equal(df, df0) -def test_zero_variables(): +def test_zero_variables(datapath): # Check if the SAS file has zero variables (PR #18184) - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "zero_variables.sas7bdat") + fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError): pd.read_sas(fname) diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index de31c3e36a8d5..6e5b2ab067aa5 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -1,3 +1,4 @@ +import pytest import pandas as pd import pandas.util.testing as tm from pandas.io.sas.sasreader import read_sas @@ -18,8 +19,9 @@ def numeric_as_float(data): class TestXport(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "sas", "data") self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt") self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt") diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 98c0effabec84..a6b331685e72a 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -9,10 +9,11 @@ from pandas import DataFrame from pandas import read_clipboard from pandas import get_option +from pandas.compat import PY2 from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf from pandas.io.clipboard.exceptions import PyperclipException -from pandas.io.clipboard import clipboard_set +from pandas.io.clipboard import clipboard_set, clipboard_get try: @@ -22,73 +23,120 @@ _DEPS_INSTALLED = 0 +def build_kwargs(sep, excel): + kwargs = {} + if excel != 'default': + kwargs['excel'] = excel + if sep != 'default': + kwargs['sep'] = sep + return kwargs + + +@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', + 'colwidth', 'mixed', 'float', 'int']) +def df(request): + data_type = request.param + + if data_type == 'delims': + return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'], + 'b': ['hi\'j', 'k\'\'lm']}) + elif data_type == 'utf8': + return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], + 'b': ['øπ∆˚¬', 'œ∑´®']}) + elif data_type == 'string': + return mkdf(5, 3, c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'long': + max_rows = get_option('display.max_rows') + return mkdf(max_rows + 1, 3, + data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'nonascii': + return pd.DataFrame({'en': 'in English'.split(), + 'es': 'en español'.split()}) + elif data_type == 'colwidth': + _cw = get_option('display.max_colwidth') + 1 + return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'mixed': + return DataFrame({'a': np.arange(1.0, 6.0) + 0.01, + 'b': np.arange(1, 6), + 'c': list('abcde')}) + elif data_type == 'float': + return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'int': + return mkdf(5, 3, data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + else: + raise ValueError + + @pytest.mark.single @pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed") class TestClipboard(object): - - @classmethod - def setup_class(cls): - cls.data = {} - cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['float'] = mkdf(5, 3, - data_gen_f=lambda r, c: float(r) + 0.01, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01, - 'b': np.arange(1, 6), - 'c': list('abcde')}) - - # Test columns exceeding "max_colwidth" (GH8305) - _cw = get_option('display.max_colwidth') + 1 - cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test GH-5346 - max_rows = get_option('display.max_rows') - cls.data['longdf'] = mkdf(max_rows + 1, 3, - data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test for non-ascii text: GH9263 - cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(), - 'es': 'en español'.split()}) - # unicode round trip test for GH 13747, GH 12529 - cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], - 'b': ['øπ∆˚¬', 'œ∑´®']}) - cls.data_types = list(cls.data.keys()) - - @classmethod - def teardown_class(cls): - del cls.data_types, cls.data - - def check_round_trip_frame(self, data_type, excel=None, sep=None, + def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None): - data = self.data[data_type] data.to_clipboard(excel=excel, sep=sep, encoding=encoding) - if sep is not None: - result = read_clipboard(sep=sep, index_col=0, encoding=encoding) - else: - result = read_clipboard(encoding=encoding) + result = read_clipboard(sep=sep or '\t', index_col=0, + encoding=encoding) tm.assert_frame_equal(data, result, check_dtype=False) - def test_round_trip_frame_sep(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, sep=',') - self.check_round_trip_frame(dt, sep=r'\s+') - self.check_round_trip_frame(dt, sep='|') - - def test_round_trip_frame_string(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, excel=False) - - def test_round_trip_frame(self): - for dt in self.data_types: - self.check_round_trip_frame(dt) + # Test that default arguments copy as tab delimited + def test_round_trip_frame(self, df): + self.check_round_trip_frame(df) + + # Test that explicit delimiters are respected + @pytest.mark.parametrize('sep', ['\t', ',', '|']) + def test_round_trip_frame_sep(self, df, sep): + self.check_round_trip_frame(df, sep=sep) + + # Test white space separator + def test_round_trip_frame_string(self, df): + df.to_clipboard(excel=False, sep=None) + result = read_clipboard() + assert df.to_string() == result.to_string() + assert df.shape == result.shape + + # Two character separator is not supported in to_clipboard + # Test that multi-character separators are not silently passed + def test_excel_sep_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=True, sep=r'\t') + + # Separator is ignored when excel=False and should produce a warning + def test_copy_delim_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=False, sep='\t') + + # Tests that the default behavior of to_clipboard is tab + # delimited and excel="True" + @pytest.mark.parametrize('sep', ['\t', None, 'default']) + @pytest.mark.parametrize('excel', [True, None, 'default']) + def test_clipboard_copy_tabs_default(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + if PY2: + # to_clipboard copies unicode, to_csv produces bytes. This is + # expected behavior + assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t') + else: + assert clipboard_get() == df.to_csv(sep='\t') + + # Tests reading of white space separated tables + @pytest.mark.parametrize('sep', [None, 'default']) + @pytest.mark.parametrize('excel', [False]) + def test_clipboard_copy_strings(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + result = read_clipboard(sep=r'\s+') + assert result.to_string() == df.to_string() + assert df.shape == result.shape def test_read_clipboard_infer_excel(self): # gh-19010: avoid warnings @@ -124,15 +172,13 @@ def test_read_clipboard_infer_excel(self): tm.assert_frame_equal(res, exp) - def test_invalid_encoding(self): + def test_invalid_encoding(self, df): # test case for testing invalid encoding - data = self.data['string'] with pytest.raises(ValueError): - data.to_clipboard(encoding='ascii') + df.to_clipboard(encoding='ascii') with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') - def test_round_trip_valid_encodings(self): - for enc in ['UTF-8', 'utf-8', 'utf8']: - for dt in self.data_types: - self.check_round_trip_frame(dt, encoding=enc) + @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) + def test_round_trip_valid_encodings(self, enc, df): + self.check_round_trip_frame(df, encoding=enc) diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index a89156db38ae3..5c9739be73393 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -149,27 +149,22 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): reader(path) @pytest.mark.parametrize('reader, module, path', [ - (pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')), - (pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')), - (pd.read_fwf, 'os', os.path.join(HERE, 'data', - 'fixed_width_format.txt')), - (pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')), - (pd.read_feather, 'feather', os.path.join(HERE, 'data', - 'feather-0_3_1.feather')), - (pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf', - 'datetimetz_object.h5')), - (pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')), - (pd.read_sas, 'os', os.path.join(HERE, 'sas', 'data', - 'test1.sas7bdat')), - (pd.read_json, 'os', os.path.join(HERE, 'json', 'data', - 'tsframe_v012.json')), - (pd.read_msgpack, 'os', os.path.join(HERE, 'msgpack', 'data', - 'frame.mp')), - (pd.read_pickle, 'os', os.path.join(HERE, 'data', - 'categorical_0_14_1.pickle')), + (pd.read_csv, 'os', ('io', 'data', 'iris.csv')), + (pd.read_table, 'os', ('io', 'data', 'iris.csv')), + (pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')), + (pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')), + (pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')), + (pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf', + 'datetimetz_object.h5')), + (pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')), + (pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')), + (pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')), + (pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')), + (pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')), ]) - def test_read_fspath_all(self, reader, module, path): + def test_read_fspath_all(self, reader, module, path, datapath): pytest.importorskip(module) + path = datapath(*path) mypath = CustomFSPath(path) result = reader(mypath) @@ -232,13 +227,14 @@ def test_write_fspath_hdf5(self): tm.assert_frame_equal(result, expected) -class TestMMapWrapper(object): +@pytest.fixture +def mmap_file(datapath): + return datapath('io', 'data', 'test_mmap.csv') + - def setup_method(self, method): - self.mmap_file = os.path.join(tm.get_data_path(), - 'test_mmap.csv') +class TestMMapWrapper(object): - def test_constructor_bad_file(self): + def test_constructor_bad_file(self, mmap_file): non_file = StringIO('I am not a file') non_file.fileno = lambda: -1 @@ -252,15 +248,15 @@ def test_constructor_bad_file(self): tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file) - target = open(self.mmap_file, 'r') + target = open(mmap_file, 'r') target.close() msg = "I/O operation on closed file" tm.assert_raises_regex( ValueError, msg, common.MMapWrapper, target) - def test_get_attr(self): - with open(self.mmap_file, 'r') as target: + def test_get_attr(self, mmap_file): + with open(mmap_file, 'r') as target: wrapper = common.MMapWrapper(target) attrs = dir(wrapper.mmap) @@ -273,8 +269,8 @@ def test_get_attr(self): assert not hasattr(wrapper, 'foo') - def test_next(self): - with open(self.mmap_file, 'r') as target: + def test_next(self, mmap_file): + with open(mmap_file, 'r') as target: wrapper = common.MMapWrapper(target) lines = target.readlines() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 05423474f330a..20f403e71fd36 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -39,8 +39,9 @@ @td.skip_if_no('xlrd', '0.9') class SharedItems(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "data") self.frame = _frame.copy() self.frame2 = _frame2.copy() self.tsframe = _tsframe.copy() @@ -49,7 +50,6 @@ def setup_method(self, method): def get_csv_refdf(self, basename): """ Obtain the reference data from read_csv with the Python engine. - Test data path is defined by pandas.util.testing.get_data_path() Parameters ---------- @@ -68,8 +68,7 @@ def get_csv_refdf(self, basename): def get_excelfile(self, basename, ext): """ - Return test data ExcelFile instance. Test data path is defined by - pandas.util.testing.get_data_path() + Return test data ExcelFile instance. Parameters ---------- @@ -86,8 +85,7 @@ def get_excelfile(self, basename, ext): def get_exceldf(self, basename, ext, *args, **kwds): """ - Return test data DataFrame. Test data path is defined by - pandas.util.testing.get_data_path() + Return test data DataFrame. Parameters ---------- @@ -578,6 +576,7 @@ def test_read_from_http_url(self, ext): tm.assert_frame_equal(url_table, local_table) @td.skip_if_no('s3fs') + @td.skip_if_not_us_locale def test_read_from_s3_url(self, ext): boto3 = pytest.importorskip('boto3') moto = pytest.importorskip('moto') diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index a56946b82b027..9c6a8de7ed446 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1,6 +1,5 @@ from __future__ import print_function -import glob import os import re import threading @@ -25,8 +24,18 @@ import pandas.util._test_decorators as td from pandas.util.testing import makeCustomDataframe as mkdf, network +HERE = os.path.dirname(__file__) -DATA_PATH = tm.get_data_path() + +@pytest.fixture(params=[ + 'chinese_utf-16.html', + 'chinese_utf-32.html', + 'chinese_utf-8.html', + 'letz_latin1.html', +]) +def html_encoding_file(request, datapath): + """Parametrized fixture for HTML encoding test filenames.""" + return datapath('io', 'data', 'html_encoding', request.param) def assert_framelist_equal(list1, list2, *args, **kwargs): @@ -44,11 +53,11 @@ def assert_framelist_equal(list1, list2, *args, **kwargs): @td.skip_if_no('bs4') -def test_bs4_version_fails(monkeypatch): +def test_bs4_version_fails(monkeypatch, datapath): import bs4 monkeypatch.setattr(bs4, '__version__', '4.2') with tm.assert_raises_regex(ValueError, "minimum version"): - read_html(os.path.join(DATA_PATH, "spam.html"), flavor='bs4') + read_html(datapath("io", "data", "spam.html"), flavor='bs4') def test_invalid_flavor(): @@ -59,8 +68,8 @@ def test_invalid_flavor(): @td.skip_if_no('bs4') @td.skip_if_no('lxml') -def test_same_ordering(): - filename = os.path.join(DATA_PATH, 'valid_markup.html') +def test_same_ordering(datapath): + filename = datapath('io', 'data', 'valid_markup.html') dfs_lxml = read_html(filename, index_col=0, flavor=['lxml']) dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4']) assert_framelist_equal(dfs_lxml, dfs_bs4) @@ -72,11 +81,14 @@ def test_same_ordering(): pytest.param('lxml', marks=pytest.mark.skipif( not td.safe_import('lxml'), reason='No lxml'))], scope="class") class TestReadHtml(object): - spam_data = os.path.join(DATA_PATH, 'spam.html') - spam_data_kwargs = {} - if PY3: - spam_data_kwargs['encoding'] = 'UTF-8' - banklist_data = os.path.join(DATA_PATH, 'banklist.html') + + @pytest.fixture(autouse=True) + def set_files(self, datapath): + self.spam_data = datapath('io', 'data', 'spam.html') + self.spam_data_kwargs = {} + if PY3: + self.spam_data_kwargs['encoding'] = 'UTF-8' + self.banklist_data = datapath("io", "data", "banklist.html") @pytest.fixture(autouse=True, scope="function") def set_defaults(self, flavor, request): @@ -272,7 +284,8 @@ def test_invalid_url(self): @pytest.mark.slow def test_file_url(self): url = self.banklist_data - dfs = self.read_html(file_path_to_url(url), 'First', + dfs = self.read_html(file_path_to_url(os.path.abspath(url)), + 'First', attrs={'id': 'table'}) assert isinstance(dfs, list) for df in dfs: @@ -326,7 +339,7 @@ def test_multiindex_header_index_skiprows(self): @pytest.mark.slow def test_regex_idempotency(self): url = self.banklist_data - dfs = self.read_html(file_path_to_url(url), + dfs = self.read_html(file_path_to_url(os.path.abspath(url)), match=re.compile(re.compile('Florida')), attrs={'id': 'table'}) assert isinstance(dfs, list) @@ -352,9 +365,9 @@ def test_python_docs_table(self): assert sorted(zz) == sorted(['Repo', 'What']) @pytest.mark.slow - def test_thousands_macau_stats(self): + def test_thousands_macau_stats(self, datapath): all_non_nan_table_index = -2 - macau_data = os.path.join(DATA_PATH, 'macau.html') + macau_data = datapath("io", "data", "macau.html") dfs = self.read_html(macau_data, index_col=0, attrs={'class': 'style1'}) df = dfs[all_non_nan_table_index] @@ -362,9 +375,9 @@ def test_thousands_macau_stats(self): assert not any(s.isna().any() for _, s in df.iteritems()) @pytest.mark.slow - def test_thousands_macau_index_col(self): + def test_thousands_macau_index_col(self, datapath): all_non_nan_table_index = -2 - macau_data = os.path.join(DATA_PATH, 'macau.html') + macau_data = datapath('io', 'data', 'macau.html') dfs = self.read_html(macau_data, index_col=0, header=0) df = dfs[all_non_nan_table_index] @@ -518,8 +531,8 @@ def test_countries_municipalities(self): res2 = self.read_html(data2, header=0) assert_framelist_equal(res1, res2) - def test_nyse_wsj_commas_table(self): - data = os.path.join(DATA_PATH, 'nyse_wsj.html') + def test_nyse_wsj_commas_table(self, datapath): + data = datapath('io', 'data', 'nyse_wsj.html') df = self.read_html(data, index_col=0, header=0, attrs={'class': 'mdcTable'})[0] @@ -530,7 +543,7 @@ def test_nyse_wsj_commas_table(self): tm.assert_index_equal(df.columns, columns) @pytest.mark.slow - def test_banklist_header(self): + def test_banklist_header(self, datapath): from pandas.io.html import _remove_whitespace def try_remove_ws(x): @@ -541,7 +554,7 @@ def try_remove_ws(x): df = self.read_html(self.banklist_data, 'Metcalf', attrs={'id': 'table'})[0] - ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'), + ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'), converters={'Updated Date': Timestamp, 'Closing Date': Timestamp}) assert df.shape == ground_truth.shape @@ -658,19 +671,19 @@ def test_parse_dates_combine(self): newdf = DataFrame({'datetime': raw_dates}) tm.assert_frame_equal(newdf, res[0]) - def test_computer_sales_page(self): - data = os.path.join(DATA_PATH, 'computer_sales_page.html') + def test_computer_sales_page(self, datapath): + data = datapath('io', 'data', 'computer_sales_page.html') with tm.assert_raises_regex(ParserError, r"Passed header=\[0,1\] are " r"too many rows for this " r"multi_index of columns"): self.read_html(data, header=[0, 1]) - data = os.path.join(DATA_PATH, 'computer_sales_page.html') + data = datapath('io', 'data', 'computer_sales_page.html') assert self.read_html(data, header=[1, 2]) - def test_wikipedia_states_table(self): - data = os.path.join(DATA_PATH, 'wikipedia_states.html') + def test_wikipedia_states_table(self, datapath): + data = datapath('io', 'data', 'wikipedia_states.html') assert os.path.isfile(data), '%r is not a file' % data assert os.path.getsize(data), '%r is an empty file' % data result = self.read_html(data, 'Arizona', header=1)[0] @@ -784,15 +797,15 @@ def test_multiple_header_rows(self): html_df = read_html(html, )[0] tm.assert_frame_equal(expected_df, html_df) - def test_works_on_valid_markup(self): - filename = os.path.join(DATA_PATH, 'valid_markup.html') + def test_works_on_valid_markup(self, datapath): + filename = datapath('io', 'data', 'valid_markup.html') dfs = self.read_html(filename, index_col=0) assert isinstance(dfs, list) assert isinstance(dfs[0], DataFrame) @pytest.mark.slow - def test_fallback_success(self): - banklist_data = os.path.join(DATA_PATH, 'banklist.html') + def test_fallback_success(self, datapath): + banklist_data = datapath('io', 'data', 'banklist.html') self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib']) def test_to_html_timestamp(self): @@ -835,22 +848,23 @@ def test_displayed_only(self, displayed_only, exp0, exp1): else: assert len(dfs) == 1 # Should not parse hidden table - @pytest.mark.parametrize("f", glob.glob( - os.path.join(DATA_PATH, 'html_encoding', '*.html'))) - def test_encode(self, f): - _, encoding = os.path.splitext(os.path.basename(f))[0].split('_') + def test_encode(self, html_encoding_file): + _, encoding = os.path.splitext( + os.path.basename(html_encoding_file) + )[0].split('_') try: - with open(f, 'rb') as fobj: + with open(html_encoding_file, 'rb') as fobj: from_string = self.read_html(fobj.read(), encoding=encoding, index_col=0).pop() - with open(f, 'rb') as fobj: + with open(html_encoding_file, 'rb') as fobj: from_file_like = self.read_html(BytesIO(fobj.read()), encoding=encoding, index_col=0).pop() - from_filename = self.read_html(f, encoding=encoding, + from_filename = self.read_html(html_encoding_file, + encoding=encoding, index_col=0).pop() tm.assert_frame_equal(from_string, from_file_like) tm.assert_frame_equal(from_string, from_filename) @@ -906,7 +920,7 @@ def seekable(self): assert self.read_html(bad) @pytest.mark.slow - def test_importcheck_thread_safety(self): + def test_importcheck_thread_safety(self, datapath): # see gh-16928 class ErrorThread(threading.Thread): @@ -921,7 +935,7 @@ def run(self): # force import check by reinitalising global vars in html.py reload(pandas.io.html) - filename = os.path.join(DATA_PATH, 'valid_markup.html') + filename = datapath('io', 'data', 'valid_markup.html') helper_thread1 = ErrorThread(target=self.read_html, args=(filename,)) helper_thread2 = ErrorThread(target=self.read_html, args=(filename,)) diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index cfac77291803d..491d5fe33cc33 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -3,6 +3,7 @@ from warnings import catch_warnings import os import datetime +import glob import numpy as np from distutils.version import LooseVersion @@ -837,13 +838,13 @@ def test_default_encoding(self): assert_frame_equal(result, frame) -def legacy_packers_versions(): - # yield the packers versions - path = tm.get_data_path('legacy_msgpack') - for v in os.listdir(path): - p = os.path.join(path, v) - if os.path.isdir(p): - yield v +files = glob.glob(os.path.join(os.path.dirname(__file__), "data", + "legacy_msgpack", "*", "*.msgpack")) + + +@pytest.fixture(params=files) +def legacy_packer(request, datapath): + return datapath(request.param) class TestMsgpack(object): @@ -920,24 +921,20 @@ def compare_frame_dt_mixed_tzs(self, result, expected, typ, version): else: tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize('version', legacy_packers_versions()) def test_msgpacks_legacy(self, current_packers_data, all_packers_data, - version): - - pth = tm.get_data_path('legacy_msgpack/{0}'.format(version)) - n = 0 - for f in os.listdir(pth): - # GH12142 0.17 files packed in P2 can't be read in P3 - if (compat.PY3 and version.startswith('0.17.') and - f.split('.')[-4][-1] == '2'): - continue - vf = os.path.join(pth, f) - try: - with catch_warnings(record=True): - self.compare(current_packers_data, all_packers_data, - vf, version) - except ImportError: - # blosc not installed - continue - n += 1 - assert n > 0, 'Msgpack files are not tested' + legacy_packer, datapath): + + version = os.path.basename(os.path.dirname(legacy_packer)) + + # GH12142 0.17 files packed in P2 can't be read in P3 + if (compat.PY3 and version.startswith('0.17.') and + legacy_packer.split('.')[-4][-1] == '2'): + msg = "Files packed in Py2 can't be read in Py3 ({})" + pytest.skip(msg.format(version)) + try: + with catch_warnings(record=True): + self.compare(current_packers_data, all_packers_data, + legacy_packer, version) + except ImportError: + # blosc not installed + pass diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index fbe2174e603e2..45cbbd43cd6a8 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -12,7 +12,7 @@ 3. Move the created pickle to "data/legacy_pickle/<version>" directory. """ - +import glob import pytest from warnings import catch_warnings @@ -184,27 +184,25 @@ def compare_sp_frame_float(result, expected, typ, version): tm.assert_sp_frame_equal(result, expected) +files = glob.glob(os.path.join(os.path.dirname(__file__), "data", + "legacy_pickle", "*", "*.pickle")) + + +@pytest.fixture(params=files) +def legacy_pickle(request, datapath): + return datapath(request.param) + + # --------------------- # tests # --------------------- -def legacy_pickle_versions(): - # yield the pickle versions - path = tm.get_data_path('legacy_pickle') - for v in os.listdir(path): - p = os.path.join(path, v) - if os.path.isdir(p): - for f in os.listdir(p): - yield (v, f) - - -@pytest.mark.parametrize('version, f', legacy_pickle_versions()) -def test_pickles(current_pickle_data, version, f): +def test_pickles(current_pickle_data, legacy_pickle): if not is_platform_little_endian(): pytest.skip("known failure on non-little endian") - vf = tm.get_data_path('legacy_pickle/{}/{}'.format(version, f)) + version = os.path.basename(os.path.dirname(legacy_pickle)) with catch_warnings(record=True): - compare(current_pickle_data, vf, version) + compare(current_pickle_data, legacy_pickle, version) def test_round_trip_current(current_pickle_data): @@ -260,12 +258,11 @@ def python_unpickler(path): compare_element(result, expected, typ) -def test_pickle_v0_14_1(): +def test_pickle_v0_14_1(datapath): cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False, categories=['a', 'b', 'c', 'd']) - pickle_path = os.path.join(tm.get_data_path(), - 'categorical_0_14_1.pickle') + pickle_path = datapath('io', 'data', 'categorical_0_14_1.pickle') # This code was executed once on v0.14.1 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], @@ -275,14 +272,13 @@ def test_pickle_v0_14_1(): tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path)) -def test_pickle_v0_15_2(): +def test_pickle_v0_15_2(datapath): # ordered -> _ordered # GH 9347 cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False, categories=['a', 'b', 'c', 'd']) - pickle_path = os.path.join(tm.get_data_path(), - 'categorical_0_15_2.pickle') + pickle_path = datapath('io', 'data', 'categorical_0_15_2.pickle') # This code was executed once on v0.15.2 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 5ac91c15047ff..3c6b52074763e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -14,7 +14,7 @@ from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, RangeIndex, Categorical, bdate_range, date_range, timedelta_range, Index, DatetimeIndex, - isna, compat, concat, Timestamp) + isna, compat, concat, Timestamp, _np_version_under1p15) import pandas.util.testing as tm import pandas.util._test_decorators as td @@ -1842,6 +1842,12 @@ def make_index(names=None): 'a', 'b'], index=make_index(['date', 'a', 't'])) pytest.raises(ValueError, store.append, 'df', df) + # dup within level + _maybe_remove(store, 'df') + df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'], + index=make_index(['date', 'date', 'date'])) + pytest.raises(ValueError, store.append, 'df', df) + # fully names _maybe_remove(store, 'df') df = DataFrame(np.zeros((12, 2)), columns=[ @@ -2134,6 +2140,10 @@ def test_unimplemented_dtypes_table_columns(self): # this fails because we have a date in the object block...... pytest.raises(TypeError, store.append, 'df_unimplemented', df) + @pytest.mark.skipif( + not _np_version_under1p15, + reason=("pytables conda build package needs build " + "with numpy 1.15: gh-22098")) def test_calendar_roundtrip_issue(self): # 8591 @@ -4452,28 +4462,27 @@ def f(): store.select('df') tm.assert_raises_regex(ClosedFileError, 'file is not open', f) - def test_pytables_native_read(self): - + def test_pytables_native_read(self, datapath): with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native.h5'), + datapath('io', 'data', 'legacy_hdf/pytables_native.h5'), mode='r') as store: d2 = store['detector/readout'] assert isinstance(d2, DataFrame) @pytest.mark.skipif(PY35 and is_platform_windows(), reason="native2 read fails oddly on windows / 3.5") - def test_pytables_native2_read(self): + def test_pytables_native2_read(self, datapath): with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native2.h5'), + datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'), mode='r') as store: str(store) d1 = store['detector'] assert isinstance(d1, DataFrame) - def test_legacy_table_read(self): + def test_legacy_table_read(self, datapath): # legacy table types with ensure_clean_store( - tm.get_data_path('legacy_hdf/legacy_table.h5'), + datapath('io', 'data', 'legacy_hdf', 'legacy_table.h5'), mode='r') as store: with catch_warnings(record=True): @@ -5120,7 +5129,7 @@ def test_fspath(self): with pd.HDFStore(path) as store: assert os.fspath(store) == str(path) - def test_read_py2_hdf_file_in_py3(self): + def test_read_py2_hdf_file_in_py3(self, datapath): # GH 16781 # tests reading a PeriodIndex DataFrame written in Python2 in Python3 @@ -5135,8 +5144,8 @@ def test_read_py2_hdf_file_in_py3(self): ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) with ensure_clean_store( - tm.get_data_path( - 'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), + datapath('io', 'data', 'legacy_hdf', + 'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), mode='r') as store: result = store['p'] assert_frame_equal(result, expected) @@ -5533,14 +5542,14 @@ def test_store_timezone(self): assert_frame_equal(result, df) - def test_legacy_datetimetz_object(self): + def test_legacy_datetimetz_object(self, datapath): # legacy from < 0.17.0 # 8260 expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5)) with ensure_clean_store( - tm.get_data_path('legacy_hdf/datetimetz_object.h5'), + datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'), mode='r') as store: result = store['df'] assert_frame_equal(result, expected) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 4530cc9d2fba9..f8f742c5980ac 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -22,7 +22,6 @@ import pytest import sqlite3 import csv -import os import warnings import numpy as np @@ -184,9 +183,11 @@ class MixInBase(object): def teardown_method(self, method): - for tbl in self._get_all_tables(): - self.drop_table(tbl) - self._close_conn() + # if setup fails, there may not be a connection to close. + if hasattr(self, 'conn'): + for tbl in self._get_all_tables(): + self.drop_table(tbl) + self._close_conn() class MySQLMixIn(MixInBase): @@ -253,9 +254,9 @@ def _get_exec(self): else: return self.conn.cursor() - def _load_iris_data(self): + def _load_iris_data(self, datapath): import io - iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv') + iris_csv_file = datapath('io', 'data', 'iris.csv') self.drop_table('iris') self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor]) @@ -503,9 +504,10 @@ class _TestSQLApi(PandasSQLTest): flavor = 'sqlite' mode = None - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.conn = self.connect() - self._load_iris_data() + self._load_iris_data(datapath) self._load_iris_view() self._load_test1_data() self._load_test2_data() @@ -1025,8 +1027,9 @@ class _EngineToConnMixin(object): A mixin that causes setup_connect to create a conn rather than an engine. """ - def setup_method(self, method): - super(_EngineToConnMixin, self).setup_method(method) + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + super(_EngineToConnMixin, self).setup_method(datapath) engine = self.conn conn = engine.connect() self.__tx = conn.begin() @@ -1034,12 +1037,14 @@ def setup_method(self, method): self.__engine = engine self.conn = conn - def teardown_method(self, method): + yield + self.__tx.rollback() self.conn.close() self.conn = self.__engine self.pandasSQL = sql.SQLDatabase(self.__engine) - super(_EngineToConnMixin, self).teardown_method(method) + # XXX: + # super(_EngineToConnMixin, self).teardown_method(method) @pytest.mark.single @@ -1136,7 +1141,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): """ flavor = None - @classmethod + @pytest.fixture(autouse=True, scope='class') def setup_class(cls): cls.setup_import() cls.setup_driver() @@ -1149,10 +1154,11 @@ def setup_class(cls): msg = "{0} - can't connect to {1} server".format(cls, cls.flavor) pytest.skip(msg) - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.setup_connect() - self._load_iris_data() + self._load_iris_data(datapath) self._load_raw_sql() self._load_test1_data() @@ -1665,29 +1671,6 @@ class Temporary(Base): tm.assert_frame_equal(df, expected) - def test_insert_multivalues(self): - # issues addressed - # https://github.com/pandas-dev/pandas/issues/14315 - # https://github.com/pandas-dev/pandas/issues/8953 - - db = sql.SQLDatabase(self.conn) - df = DataFrame({'A': [1, 0, 0], 'B': [1.1, 0.2, 4.3]}) - table = sql.SQLTable("test_table", db, frame=df) - data = [ - {'A': 1, 'B': 0.46}, - {'A': 0, 'B': -2.06} - ] - statement = table.insert_statement(data, conn=self.conn)[0] - - if self.supports_multivalues_insert: - assert statement.parameters == data, ( - 'insert statement should be multivalues' - ) - else: - assert statement.parameters is None, ( - 'insert statement should not be multivalues' - ) - class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy): @@ -1702,7 +1685,6 @@ class _TestSQLiteAlchemy(object): """ flavor = 'sqlite' - supports_multivalues_insert = True @classmethod def connect(cls): @@ -1751,7 +1733,6 @@ class _TestMySQLAlchemy(object): """ flavor = 'mysql' - supports_multivalues_insert = True @classmethod def connect(cls): @@ -1821,7 +1802,6 @@ class _TestPostgreSQLAlchemy(object): """ flavor = 'postgresql' - supports_multivalues_insert = True @classmethod def connect(cls): @@ -1946,11 +1926,12 @@ class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest): def connect(cls): return sqlite3.connect(':memory:') - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.conn = self.connect() self.pandasSQL = sql.SQLiteDatabase(self.conn) - self._load_iris_data() + self._load_iris_data(datapath) self._load_test1_data() @@ -2161,8 +2142,9 @@ def _skip_if_no_pymysql(): @pytest.mark.single class TestXSQLite(SQLiteMixIn): - def setup_method(self, method): - self.method = method + @pytest.fixture(autouse=True) + def setup_method(self, request, datapath): + self.method = request.function self.conn = sqlite3.connect(':memory:') def test_basic(self): @@ -2241,8 +2223,7 @@ def test_execute_fail(self): with pytest.raises(Exception): sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn) - @tm.capture_stdout - def test_execute_closed_connection(self): + def test_execute_closed_connection(self, request, datapath): create_sql = """ CREATE TABLE test ( @@ -2262,7 +2243,7 @@ def test_execute_closed_connection(self): tquery("select * from test", con=self.conn) # Initialize connection again (needed for tearDown) - self.setup_method(self.method) + self.setup_method(request, datapath) def test_na_roundtrip(self): pass @@ -2367,7 +2348,7 @@ def clean_up(test_table_to_drop): "if SQLAlchemy is not installed") class TestXMySQL(MySQLMixIn): - @classmethod + @pytest.fixture(autouse=True, scope='class') def setup_class(cls): _skip_if_no_pymysql() @@ -2396,7 +2377,8 @@ def setup_class(cls): "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, request, datapath): _skip_if_no_pymysql() import pymysql try: @@ -2422,7 +2404,7 @@ def setup_method(self, method): "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") - self.method = method + self.method = request.function def test_basic(self): _skip_if_no_pymysql() @@ -2527,8 +2509,7 @@ def test_execute_fail(self): with pytest.raises(Exception): sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn) - @tm.capture_stdout - def test_execute_closed_connection(self): + def test_execute_closed_connection(self, request, datapath): _skip_if_no_pymysql() drop_sql = "DROP TABLE IF EXISTS test" create_sql = """ @@ -2551,7 +2532,7 @@ def test_execute_closed_connection(self): tquery("select * from test", con=self.conn) # Initialize connection again (needed for tearDown) - self.setup_method(self.method) + self.setup_method(request, datapath) def test_na_roundtrip(self): _skip_if_no_pymysql() diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 110b790a65037..cff63516f4086 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -2,6 +2,8 @@ # pylint: disable=E1101 import datetime as dt +import io +import gzip import os import struct import warnings @@ -23,8 +25,8 @@ @pytest.fixture -def dirpath(): - return tm.get_data_path() +def dirpath(datapath): + return datapath("io", "data") @pytest.fixture @@ -37,8 +39,9 @@ def parsed_114(dirpath): class TestStata(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "data") self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') @@ -1473,3 +1476,28 @@ def test_invalid_date_conversion(self): with pytest.raises(ValueError): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) + + @pytest.mark.parametrize('version', [114, 117]) + def test_nonfile_writing(self, version): + # GH 21041 + bio = io.BytesIO() + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + df.to_stata(bio, version=version) + bio.seek(0) + with open(path, 'wb') as dta: + dta.write(bio.read()) + reread = pd.read_stata(path, index_col='index') + tm.assert_frame_equal(df, reread) + + def test_gzip_writing(self): + # writing version 117 requires seek and cannot be used with gzip + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + with gzip.GzipFile(path, 'wb') as gz: + df.to_stata(gz, version=114) + with gzip.GzipFile(path, 'rb') as gz: + reread = pd.read_stata(gz, index_col='index') + tm.assert_frame_equal(df, reread) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index f65791329f2f1..09687dd97bd43 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -74,11 +74,6 @@ def setup_method(self, method): else: self.default_figsize = (8.0, 6.0) self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default' - # common test data - from pandas import read_csv - base = os.path.join(os.path.dirname(curpath()), os.pardir) - path = os.path.join(base, 'tests', 'data', 'iris.csv') - self.iris = read_csv(path) n = 100 with tm.RNGContext(42): diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 47cded19f5300..bb976a1e3e81c 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -1,4 +1,5 @@ import subprocess +import sys import pytest from datetime import datetime, date @@ -27,7 +28,7 @@ def test_register_by_default(self): "import pandas as pd; " "units = dict(matplotlib.units.registry); " "assert pd.Timestamp in units)'") - call = ['python', '-c', code] + call = [sys.executable, '-c', code] assert subprocess.check_call(call) == 0 def test_warns(self): diff --git a/pandas/tests/plotting/test_deprecated.py b/pandas/tests/plotting/test_deprecated.py index 2c2d371921d2f..a45b17ec98261 100644 --- a/pandas/tests/plotting/test_deprecated.py +++ b/pandas/tests/plotting/test_deprecated.py @@ -46,10 +46,9 @@ def test_boxplot_deprecated(self): by='indic') @pytest.mark.slow - def test_radviz_deprecated(self): - df = self.iris + def test_radviz_deprecated(self, iris): with tm.assert_produces_warning(FutureWarning): - plotting.radviz(frame=df, class_column='Name') + plotting.radviz(frame=iris, class_column='Name') @pytest.mark.slow def test_plot_params(self): diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index c82c939584dc7..0473610ea2f8f 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -100,11 +100,11 @@ def test_scatter_matrix_axis(self): axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) @pytest.mark.slow - def test_andrews_curves(self): + def test_andrews_curves(self, iris): from pandas.plotting import andrews_curves from matplotlib import cm - df = self.iris + df = iris _check_plot_works(andrews_curves, frame=df, class_column='Name') @@ -165,11 +165,11 @@ def test_andrews_curves(self): andrews_curves(data=df, class_column='Name') @pytest.mark.slow - def test_parallel_coordinates(self): + def test_parallel_coordinates(self, iris): from pandas.plotting import parallel_coordinates from matplotlib import cm - df = self.iris + df = iris ax = _check_plot_works(parallel_coordinates, frame=df, class_column='Name') @@ -234,11 +234,11 @@ def test_parallel_coordinates_with_sorted_labels(self): assert prev[1] < nxt[1] and prev[0] < nxt[0] @pytest.mark.slow - def test_radviz(self): + def test_radviz(self, iris): from pandas.plotting import radviz from matplotlib import cm - df = self.iris + df = iris _check_plot_works(radviz, frame=df, class_column='Name') rgba = ('#556270', '#4ECDC4', '#C7F464') @@ -272,8 +272,8 @@ def test_radviz(self): self._check_colors(handles, facecolors=colors) @pytest.mark.slow - def test_subplot_titles(self): - df = self.iris.drop('Name', axis=1).head() + def test_subplot_titles(self, iris): + df = iris.drop('Name', axis=1).head() # Use the column names as the subplot titles title = list(df.columns) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 8e639edd34b18..037bd9cc7cd18 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1526,6 +1526,27 @@ def test_merge_on_ints_floats_warning(self): result = B.merge(A, left_on='Y', right_on='X') assert_frame_equal(result, expected[['Y', 'X']]) + def test_merge_incompat_infer_boolean_object(self): + # GH21119: bool + object bool merge OK + df1 = DataFrame({'key': Series([True, False], dtype=object)}) + df2 = DataFrame({'key': [True, False]}) + + expected = DataFrame({'key': [True, False]}, dtype=object) + result = pd.merge(df1, df2, on='key') + assert_frame_equal(result, expected) + result = pd.merge(df2, df1, on='key') + assert_frame_equal(result, expected) + + # with missing value + df1 = DataFrame({'key': Series([True, False, np.nan], dtype=object)}) + df2 = DataFrame({'key': [True, False]}) + + expected = DataFrame({'key': [True, False]}, dtype=object) + result = pd.merge(df1, df2, on='key') + assert_frame_equal(result, expected) + result = pd.merge(df2, df1, on='key') + assert_frame_equal(result, expected) + @pytest.mark.parametrize('df1_vals, df2_vals', [ ([0, 1, 2], ["0", "1", "2"]), ([0.0, 1.0, 2.0], ["0", "1", "2"]), @@ -1538,6 +1559,8 @@ def test_merge_on_ints_floats_warning(self): pd.date_range('20130101', periods=3, tz='US/Eastern')), ([0, 1, 2], Series(['a', 'b', 'a']).astype('category')), ([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')), + # TODO ([0, 1], pd.Series([False, True], dtype=bool)), + ([0, 1], pd.Series([False, True], dtype=object)) ]) def test_merge_incompat_dtypes(self, df1_vals, df2_vals): # GH 9780, GH 15800 diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index cebbcc41c3e17..59b53cd23010e 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1,4 +1,3 @@ -import os import pytest import pytz @@ -13,8 +12,8 @@ class TestAsOfMerge(object): - def read_data(self, name, dedupe=False): - path = os.path.join(tm.get_data_path(), name) + def read_data(self, datapath, name, dedupe=False): + path = datapath('reshape', 'merge', 'data', name) x = read_csv(path) if dedupe: x = (x.drop_duplicates(['time', 'ticker'], keep='last') @@ -23,15 +22,17 @@ def read_data(self, name, dedupe=False): x.time = to_datetime(x.time) return x - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): - self.trades = self.read_data('trades.csv') - self.quotes = self.read_data('quotes.csv', dedupe=True) - self.asof = self.read_data('asof.csv') - self.tolerance = self.read_data('tolerance.csv') - self.allow_exact_matches = self.read_data('allow_exact_matches.csv') + self.trades = self.read_data(datapath, 'trades.csv') + self.quotes = self.read_data(datapath, 'quotes.csv', dedupe=True) + self.asof = self.read_data(datapath, 'asof.csv') + self.tolerance = self.read_data(datapath, 'tolerance.csv') + self.allow_exact_matches = self.read_data(datapath, + 'allow_exact_matches.csv') self.allow_exact_matches_and_tolerance = self.read_data( - 'allow_exact_matches_and_tolerance.csv') + datapath, 'allow_exact_matches_and_tolerance.csv') def test_examples1(self): """ doc-string examples """ @@ -423,11 +424,11 @@ def test_multiby_indexed(self): pd.merge_asof(left, right, left_index=True, right_index=True, left_by=['k1', 'k2'], right_by=['k1']) - def test_basic2(self): + def test_basic2(self, datapath): - expected = self.read_data('asof2.csv') - trades = self.read_data('trades2.csv') - quotes = self.read_data('quotes2.csv', dedupe=True) + expected = self.read_data(datapath, 'asof2.csv') + trades = self.read_data(datapath, 'trades2.csv') + quotes = self.read_data(datapath, 'quotes2.csv', dedupe=True) result = merge_asof(trades, quotes, on='time', @@ -467,14 +468,14 @@ def test_valid_join_keys(self): merge_asof(trades, quotes, by='ticker') - def test_with_duplicates(self): + def test_with_duplicates(self, datapath): q = pd.concat([self.quotes, self.quotes]).sort_values( ['time', 'ticker']).reset_index(drop=True) result = merge_asof(self.trades, q, on='time', by='ticker') - expected = self.read_data('asof.csv') + expected = self.read_data(datapath, 'asof.csv') assert_frame_equal(result, expected) def test_with_duplicates_no_on(self): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index f5e58fa70e1c4..dea305d4b3fee 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2487,3 +2487,14 @@ def test_concat_aligned_sort_does_not_raise(): columns=[1, 'a']) result = pd.concat([df, df], ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("s1name,s2name", [ + (np.int64(190), (43, 0)), (190, (43, 0))]) +def test_concat_series_name_npscalar_tuple(s1name, s2name): + # GH21015 + s1 = pd.Series({'a': 1, 'b': 2}, name=s1name) + s2 = pd.Series({'c': 5, 'd': 6}, name=s2name) + result = pd.concat([s1, s2]) + expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6}) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index d2cf3fc11e165..b71954163f9e1 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from datetime import datetime, date, timedelta @@ -16,6 +17,11 @@ from pandas.api.types import CategoricalDtype as CDT +@pytest.fixture(params=[True, False]) +def dropna(request): + return request.param + + class TestPivotTable(object): def setup_method(self, method): @@ -109,7 +115,6 @@ def test_pivot_table_categorical(self): index=exp_index) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize('dropna', [True, False]) def test_pivot_table_dropna_categoricals(self, dropna): # GH 15193 categories = ['a', 'b', 'c', 'd'] @@ -137,6 +142,25 @@ def test_pivot_table_dropna_categoricals(self, dropna): tm.assert_frame_equal(result, expected) + def test_pivot_with_non_observable_dropna(self, dropna): + # gh-21133 + df = pd.DataFrame( + {'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'], + categories=['low', 'high'], + ordered=True), + 'B': range(5)}) + + result = df.pivot_table(index='A', values='B', dropna=dropna) + expected = pd.DataFrame( + {'B': [2, 3]}, + index=pd.Index( + pd.Categorical.from_codes([0, 1], + categories=['low', 'high'], + ordered=True), + name='A')) + + tm.assert_frame_equal(result, expected) + def test_pass_array(self): result = self.data.pivot_table( 'D', index=self.data.A, columns=self.data.C) @@ -1705,9 +1729,15 @@ def test_crosstab_with_numpy_size(self): tm.assert_frame_equal(result, expected) def test_crosstab_dup_index_names(self): - # GH 13279, GH 18872 + # GH 13279 s = pd.Series(range(3), name='foo') - pytest.raises(ValueError, pd.crosstab, s, s) + + result = pd.crosstab(s, s) + expected_index = pd.Index(range(3), name='foo') + expected = pd.DataFrame(np.eye(3, dtype=np.int64), + index=expected_index, + columns=expected_index) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("names", [['a', ('b', 'c')], [('a', 'b'), 'c']]) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 5ea27f9e34e1c..807fb2530603a 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -282,10 +282,10 @@ def test_round_frac(self): result = tmod._round_frac(0.000123456, precision=2) assert result == 0.00012 - def test_qcut_binning_issues(self): + def test_qcut_binning_issues(self, datapath): # #1978, 1979 - path = os.path.join(tm.get_data_path(), 'cut_data.csv') - arr = np.loadtxt(path) + cut_file = datapath(os.path.join('reshape', 'data', 'cut_data.csv')) + arr = np.loadtxt(cut_file) result = qcut(arr, 20) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 3fdc2aa71bfc0..6472bd4245622 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -106,6 +106,16 @@ def test_compare_timedelta_ndarray(self): class TestTimedeltas(object): + @pytest.mark.parametrize("unit, value, expected", [ + ('us', 9.999, 9999), ('ms', 9.999999, 9999999), + ('s', 9.999999999, 9999999999)]) + def test_rounding_on_int_unit_construction(self, unit, value, expected): + # GH 12690 + result = Timedelta(value, unit=unit) + assert result.value == expected + result = Timedelta(str(value) + unit) + assert result.value == expected + def test_total_seconds_scalar(self): # see gh-10939 rng = Timedelta('1 days, 10:11:12.100123456') @@ -578,3 +588,17 @@ def test_components(self): result = s.dt.components assert not result.iloc[0].isna().all() assert result.iloc[1].isna().all() + + +@pytest.mark.parametrize('value, expected', [ + (Timedelta('10S'), True), + (Timedelta('-10S'), True), + (Timedelta(10, unit='ns'), True), + (Timedelta(0, unit='ns'), False), + (Timedelta(-10, unit='ns'), True), + (Timedelta(None), True), + (pd.NaT, True), +]) +def test_truthiness(value, expected): + # https://github.com/pandas-dev/pandas/issues/21484 + assert bool(value) is expected diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index b022b327de57c..e829506e95b53 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -5,6 +5,7 @@ import dateutil import calendar import locale +import unicodedata import numpy as np from dateutil.tz import tzutc @@ -20,7 +21,7 @@ from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz from pandas.errors import OutOfBoundsDatetime -from pandas.compat import long, PY3 +from pandas.compat import long, PY3, PY2 from pandas.compat.numpy import np_datetime64_compat from pandas import Timestamp, Period, Timedelta, NaT @@ -116,8 +117,21 @@ def test_names(self, data, time_locale): expected_day = calendar.day_name[0].capitalize() expected_month = calendar.month_name[8].capitalize() - assert data.day_name(time_locale) == expected_day - assert data.month_name(time_locale) == expected_month + result_day = data.day_name(time_locale) + result_month = data.month_name(time_locale) + + # Work around https://github.com/pandas-dev/pandas/issues/22342 + # different normalizations + + if not PY2: + expected_day = unicodedata.normalize("NFD", expected_day) + expected_month = unicodedata.normalize("NFD", expected_month) + + result_day = unicodedata.normalize("NFD", result_day,) + result_month = unicodedata.normalize("NFD", result_month) + + assert result_day == expected_day + assert result_month == expected_month # Test NaT nan_ts = Timestamp(NaT) @@ -528,6 +542,14 @@ def test_disallow_setting_tz(self, tz): with pytest.raises(AttributeError): ts.tz = tz + @pytest.mark.parametrize('offset', ['+0300', '+0200']) + def test_construct_timestamp_near_dst(self, offset): + # GH 20854 + expected = Timestamp('2016-10-30 03:00:00{}'.format(offset), + tz='Europe/Helsinki') + result = Timestamp(expected, tz='Europe/Helsinki') + assert result == expected + class TestTimestamp(object): @@ -621,10 +643,51 @@ def test_basics_nanos(self): assert stamp.microsecond == 145224 assert stamp.nanosecond == 192 - def test_unit(self): - - def check(val, unit=None, h=1, s=1, us=0): - stamp = Timestamp(val, unit=unit) + @pytest.mark.parametrize('value, check_kwargs', [ + [946688461000000000, {}], + [946688461000000000 / long(1000), dict(unit='us')], + [946688461000000000 / long(1000000), dict(unit='ms')], + [946688461000000000 / long(1000000000), dict(unit='s')], + [10957, dict(unit='D', h=0)], + pytest.param((946688461000000000 + 500000) / long(1000000000), + dict(unit='s', us=499, ns=964), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000000) / long(1000000000), + dict(unit='s', us=500000), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000) / long(1000000), + dict(unit='ms', us=500), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000) / long(1000000000), + dict(unit='s'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + pytest.param((946688461000000000 + 500000000) / long(1000000000), + dict(unit='s'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + pytest.param((946688461000000000 + 500000) / long(1000000), + dict(unit='ms'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + [(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)], + [(946688461000000000 + 500000000) / long(1000000), + dict(unit='ms', us=500000)], + [946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)], + [946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)], + [946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)], + [946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)], + [946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)], + [10957 + 0.5, dict(unit='D', h=12)]]) + def test_unit(self, value, check_kwargs): + def check(value, unit=None, h=1, s=1, us=0, ns=0): + stamp = Timestamp(value, unit=unit) assert stamp.year == 2000 assert stamp.month == 1 assert stamp.day == 1 @@ -637,41 +700,9 @@ def check(val, unit=None, h=1, s=1, us=0): assert stamp.minute == 0 assert stamp.second == 0 assert stamp.microsecond == 0 - assert stamp.nanosecond == 0 - - ts = Timestamp('20000101 01:01:01') - val = ts.value - days = (ts - Timestamp('1970-01-01')).days - - check(val) - check(val / long(1000), unit='us') - check(val / long(1000000), unit='ms') - check(val / long(1000000000), unit='s') - check(days, unit='D', h=0) + assert stamp.nanosecond == ns - # using truediv, so these are like floats - if PY3: - check((val + 500000) / long(1000000000), unit='s', us=500) - check((val + 500000000) / long(1000000000), unit='s', us=500000) - check((val + 500000) / long(1000000), unit='ms', us=500) - - # get chopped in py2 - else: - check((val + 500000) / long(1000000000), unit='s') - check((val + 500000000) / long(1000000000), unit='s') - check((val + 500000) / long(1000000), unit='ms') - - # ok - check((val + 500000) / long(1000), unit='us', us=500) - check((val + 500000000) / long(1000000), unit='ms', us=500000) - - # floats - check(val / 1000.0 + 5, unit='us', us=5) - check(val / 1000.0 + 5000, unit='us', us=5000) - check(val / 1000000.0 + 0.5, unit='ms', us=500) - check(val / 1000000.0 + 0.005, unit='ms', us=5) - check(val / 1000000000.0 + 0.5, unit='s', us=500000) - check(days + 0.5, unit='D', h=12) + check(value, **check_kwargs) def test_roundtrip(self): diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index aecddab8477fc..dbe31ccb11114 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -118,6 +118,25 @@ def test_ceil_floor_edge(self, test_input, rounder, freq, expected): expected = Timestamp(expected) assert result == expected + @pytest.mark.parametrize('test_input, freq, expected', [ + ('2018-01-01 00:02:06', '2s', '2018-01-01 00:02:06'), + ('2018-01-01 00:02:00', '2T', '2018-01-01 00:02:00'), + ('2018-01-01 00:04:00', '4T', '2018-01-01 00:04:00'), + ('2018-01-01 00:15:00', '15T', '2018-01-01 00:15:00'), + ('2018-01-01 00:20:00', '20T', '2018-01-01 00:20:00'), + ('2018-01-01 03:00:00', '3H', '2018-01-01 03:00:00'), + ]) + @pytest.mark.parametrize('rounder', ['ceil', 'floor', 'round']) + def test_round_minute_freq(self, test_input, freq, expected, rounder): + # Ensure timestamps that shouldnt round dont! + # GH#21262 + + dt = Timestamp(test_input) + expected = Timestamp(expected) + func = getattr(dt, rounder) + result = func(freq) + assert result == expected + def test_ceil(self): dt = Timestamp('20130101 09:10:11') result = dt.ceil('D') @@ -257,7 +276,6 @@ def test_timestamp(self): if PY3: # datetime.timestamp() converts in the local timezone with tm.set_timezone('UTC'): - # should agree with datetime.timestamp method dt = ts.to_pydatetime() assert dt.timestamp() == ts.timestamp() diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index 999ed5f26daee..2fdf198596ce2 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -463,54 +463,86 @@ def test_rename(): assert result.name == expected.name -def test_drop(): - # unique - s = Series([1, 2], index=['one', 'two']) - expected = Series([1], index=['one']) - result = s.drop(['two']) - assert_series_equal(result, expected) - result = s.drop('two', axis='rows') - assert_series_equal(result, expected) - - # non-unique - # GH 5248 - s = Series([1, 1, 2], index=['one', 'two', 'one']) - expected = Series([1, 2], index=['one', 'one']) - result = s.drop(['two'], axis=0) - assert_series_equal(result, expected) - result = s.drop('two') - assert_series_equal(result, expected) - - expected = Series([1], index=['two']) - result = s.drop(['one']) - assert_series_equal(result, expected) - result = s.drop('one') - assert_series_equal(result, expected) +@pytest.mark.parametrize( + 'data, index, drop_labels,' + ' axis, expected_data, expected_index', + [ + # Unique Index + ([1, 2], ['one', 'two'], ['two'], + 0, [1], ['one']), + ([1, 2], ['one', 'two'], ['two'], + 'rows', [1], ['one']), + ([1, 1, 2], ['one', 'two', 'one'], ['two'], + 0, [1, 2], ['one', 'one']), + + # GH 5248 Non-Unique Index + ([1, 1, 2], ['one', 'two', 'one'], 'two', + 0, [1, 2], ['one', 'one']), + ([1, 1, 2], ['one', 'two', 'one'], ['one'], + 0, [1], ['two']), + ([1, 1, 2], ['one', 'two', 'one'], 'one', + 0, [1], ['two'])]) +def test_drop_unique_and_non_unique_index(data, index, axis, drop_labels, + expected_data, expected_index): + + s = Series(data=data, index=index) + result = s.drop(drop_labels, axis=axis) + expected = Series(data=expected_data, index=expected_index) + tm.assert_series_equal(result, expected) - # single string/tuple-like - s = Series(range(3), index=list('abc')) - pytest.raises(KeyError, s.drop, 'bc') - pytest.raises(KeyError, s.drop, ('a',)) +@pytest.mark.parametrize( + 'data, index, drop_labels,' + ' axis, error_type, error_desc', + [ + # single string/tuple-like + (range(3), list('abc'), 'bc', + 0, KeyError, 'not found in axis'), + + # bad axis + (range(3), list('abc'), ('a',), + 0, KeyError, 'not found in axis'), + (range(3), list('abc'), 'one', + 'columns', ValueError, 'No axis named columns')]) +def test_drop_exception_raised(data, index, drop_labels, + axis, error_type, error_desc): + + with tm.assert_raises_regex(error_type, error_desc): + Series(data, index=index).drop(drop_labels, axis=axis) + + +def test_drop_with_ignore_errors(): # errors='ignore' s = Series(range(3), index=list('abc')) result = s.drop('bc', errors='ignore') - assert_series_equal(result, s) + tm.assert_series_equal(result, s) result = s.drop(['a', 'd'], errors='ignore') expected = s.iloc[1:] - assert_series_equal(result, expected) - - # bad axis - pytest.raises(ValueError, s.drop, 'one', axis='columns') + tm.assert_series_equal(result, expected) # GH 8522 s = Series([2, 3], index=[True, False]) assert s.index.is_object() result = s.drop(True) expected = Series([3], index=[False]) - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) + - # GH 16877 - s = Series([2, 3], index=[0, 1]) - with tm.assert_raises_regex(KeyError, 'not contained in axis'): - s.drop([False, True]) +@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 3]]) +@pytest.mark.parametrize('drop_labels', [[], [1], [3]]) +def test_drop_empty_list(index, drop_labels): + # GH 21494 + expected_index = [i for i in index if i not in drop_labels] + series = pd.Series(index=index).drop(drop_labels) + tm.assert_series_equal(series, pd.Series(index=expected_index)) + + +@pytest.mark.parametrize('data, index, drop_labels', [ + (None, [1, 2, 3], [1, 4]), + (None, [1, 2, 2], [1, 4]), + ([2, 3], [0, 1], [False, True]) +]) +def test_drop_non_empty_list(data, index, drop_labels): + # GH 21494 and GH 16877 + with tm.assert_raises_regex(KeyError, 'not found in axis'): + pd.Series(data=data, index=index).drop(drop_labels) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index dce4e82cbdcf1..859082a7e722d 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -188,6 +188,11 @@ def test_reset_index_level(self): with tm.assert_raises_regex(IndexError, 'Too many levels'): s.reset_index(level=[0, 1, 2]) + # Check that .reset_index([],drop=True) doesn't fail + result = pd.Series(range(4)).reset_index([], drop=True) + expected = pd.Series(range(4)) + assert_series_equal(result, expected) + def test_reset_index_range(self): # GH 12071 s = pd.Series(range(2), name='A', dtype='int64') @@ -275,3 +280,18 @@ def test_set_axis_prior_to_deprecation_signature(self): with tm.assert_produces_warning(FutureWarning): result = s.set_axis(0, list('abcd'), inplace=False) tm.assert_series_equal(result, expected) + + def test_reset_index_drop_errors(self): + # GH 20925 + + # KeyError raised for series index when passed level name is missing + s = pd.Series(range(4)) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong', drop=True) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong') + + # KeyError raised for series when level to be dropped is missing + s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2)) + with tm.assert_raises_regex(KeyError, 'not found'): + s.reset_index('wrong', drop=True) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 6ea40329f4bc3..bcf209521f913 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1140,11 +1140,15 @@ def test_clip_with_na_args(self): s = Series([1, 2, 3]) assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) - assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3])) - assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3])) assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + # GH #19992 + assert_series_equal(s.clip(lower=[0, 4, np.nan]), + Series([1, 4, np.nan])) + assert_series_equal(s.clip(upper=[1, np.nan, 1]), + Series([1, np.nan, 1])) + def test_clip_against_series(self): # GH #6966 @@ -1866,6 +1870,15 @@ def s_main_dtypes(): return df +def assert_check_nselect_boundary(vals, dtype, method): + # helper function for 'test_boundary_{dtype}' tests + s = Series(vals, dtype=dtype) + result = getattr(s, method)(3) + expected_idxr = [0, 1, 2] if method == 'nsmallest' else [3, 2, 1] + expected = s.loc[expected_idxr] + tm.assert_series_equal(result, expected) + + class TestNLargestNSmallest(object): @pytest.mark.parametrize( @@ -1950,6 +1963,32 @@ def test_n(self, n): expected = s.sort_values().head(n) assert_series_equal(result, expected) + def test_boundary_integer(self, nselect_method, any_int_dtype): + # GH 21426 + dtype_info = np.iinfo(any_int_dtype) + min_val, max_val = dtype_info.min, dtype_info.max + vals = [min_val, min_val + 1, max_val - 1, max_val] + assert_check_nselect_boundary(vals, any_int_dtype, nselect_method) + + def test_boundary_float(self, nselect_method, float_dtype): + # GH 21426 + dtype_info = np.finfo(float_dtype) + min_val, max_val = dtype_info.min, dtype_info.max + min_2nd, max_2nd = np.nextafter( + [min_val, max_val], 0, dtype=float_dtype) + vals = [min_val, min_2nd, max_2nd, max_val] + assert_check_nselect_boundary(vals, float_dtype, nselect_method) + + @pytest.mark.parametrize('dtype', ['datetime64[ns]', 'timedelta64[ns]']) + def test_boundary_datetimelike(self, nselect_method, dtype): + # GH 21426 + # use int64 bounds and +1 to min_val since true minimum is NaT + # (include min_val/NaT at end to maintain same expected_idxr) + dtype_info = np.iinfo('int64') + min_val, max_val = dtype_info.min, dtype_info.max + vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val] + assert_check_nselect_boundary(vals, dtype, nselect_method) + class TestCategoricalSeriesAnalytics(object): diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index ec0d7296e540e..95836f046195a 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -88,6 +88,46 @@ def test_ser_cmp_result_names(self, names, op): class TestTimestampSeriesComparison(object): + def test_dt64_ser_cmp_date_warning(self): + # https://github.com/pandas-dev/pandas/issues/21359 + # Remove this test and enble invalid test below + ser = pd.Series(pd.date_range('20010101', periods=10), name='dates') + date = ser.iloc[0].to_pydatetime().date() + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser == date + expected = pd.Series([True] + [False] * 9, name='dates') + tm.assert_series_equal(result, expected) + assert "Comparing Series of datetimes " in str(m[0].message) + assert "will not compare equal" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser != date + tm.assert_series_equal(result, ~expected) + assert "will not compare equal" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser <= date + tm.assert_series_equal(result, expected) + assert "a TypeError will be raised" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser < date + tm.assert_series_equal(result, pd.Series([False] * 10, name='dates')) + assert "a TypeError will be raised" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser >= date + tm.assert_series_equal(result, pd.Series([True] * 10, name='dates')) + assert "a TypeError will be raised" in str(m[0].message) + + with tm.assert_produces_warning(FutureWarning) as m: + result = ser > date + tm.assert_series_equal(result, pd.Series([False] + [True] * 9, + name='dates')) + assert "a TypeError will be raised" in str(m[0].message) + + @pytest.mark.skip(reason="GH-21359") def test_dt64ser_cmp_date_invalid(self): # GH#19800 datetime.date comparison raises to # match DatetimeIndex/Timestamp. This also matches the behavior diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 7e59325c32ddc..906d2aacd5586 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -137,6 +137,17 @@ def test_constructor_no_data_index_order(self): result = pd.Series(index=['b', 'a', 'c']) assert result.index.tolist() == ['b', 'a', 'c'] + def test_constructor_dtype_str_na_values(self, string_dtype): + # https://github.com/pandas-dev/pandas/issues/21083 + ser = Series(['x', None], dtype=string_dtype) + result = ser.isna() + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + assert ser.iloc[1] is None + + ser = Series(['x', np.nan], dtype=string_dtype) + assert np.isnan(ser.iloc[1]) + def test_constructor_series(self): index1 = ['d', 'b', 'a', 'c'] index2 = sorted(index1) @@ -164,22 +175,25 @@ def test_constructor_list_like(self): @pytest.mark.parametrize('input_vals', [ ([1, 2]), - ([1.0, 2.0, np.nan]), (['1', '2']), (list(pd.date_range('1/1/2011', periods=2, freq='H'))), (list(pd.date_range('1/1/2011', periods=2, freq='H', tz='US/Eastern'))), ([pd.Interval(left=0, right=5)]), ]) - def test_constructor_list_str(self, input_vals): + def test_constructor_list_str(self, input_vals, string_dtype): # GH 16605 # Ensure that data elements from a list are converted to strings # when dtype is str, 'str', or 'U' + result = Series(input_vals, dtype=string_dtype) + expected = Series(input_vals).astype(string_dtype) + assert_series_equal(result, expected) - for dtype in ['str', str, 'U']: - result = Series(input_vals, dtype=dtype) - expected = Series(input_vals).astype(dtype) - assert_series_equal(result, expected) + def test_constructor_list_str_na(self, string_dtype): + result = Series([1.0, 2.0, np.nan], dtype=string_dtype) + expected = Series(['1.0', '2.0', np.nan], dtype=object) + assert_series_equal(result, expected) + assert np.isnan(result[2]) def test_constructor_generator(self): gen = (i for i in range(10)) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 47798d0ddd7f5..5e924ac5c8894 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -3,6 +3,7 @@ import locale import calendar +import unicodedata import pytest from datetime import datetime, date @@ -13,7 +14,8 @@ from pandas.core.dtypes.common import is_integer_dtype, is_list_like from pandas import (Index, Series, DataFrame, bdate_range, date_range, period_range, timedelta_range, - PeriodIndex, DatetimeIndex, TimedeltaIndex) + PeriodIndex, DatetimeIndex, TimedeltaIndex, + compat) import pandas.core.common as com from pandas.util.testing import assert_series_equal @@ -309,10 +311,24 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): s = Series(DatetimeIndex(freq='M', start='2012', end='2013')) result = s.dt.month_name(locale=time_locale) expected = Series([month.capitalize() for month in expected_months]) + + # work around https://github.com/pandas-dev/pandas/issues/22342 + if not compat.PY2: + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") + tm.assert_series_equal(result, expected) + for s_date, expected in zip(s, expected_months): result = s_date.month_name(locale=time_locale) - assert result == expected.capitalize() + expected = expected.capitalize() + + if not compat.PY2: + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", expected) + + assert result == expected + s = s.append(Series([pd.NaT])) assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1]) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 0b0d4334c86a3..90f37053ce17e 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -11,6 +11,7 @@ from pandas import Series, DataFrame from pandas.compat import StringIO, u +from pandas.io.common import _get_handle from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, ensure_clean) import pandas.util.testing as tm @@ -138,29 +139,44 @@ def test_to_csv_path_is_none(self): csv_str = s.to_csv(path=None) assert isinstance(csv_str, str) - def test_to_csv_compression(self, compression): - - s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'], - name='X') + @pytest.mark.parametrize('s,encoding', [ + (Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'], + name='X'), None), + # GH 21241, 21118 + (Series(['abc', 'def', 'ghi'], name='X'), 'ascii'), + (Series(["123", u"你好", u"世界"], name=u"中文"), 'gb2312'), + (Series(["123", u"Γειά σου", u"Κόσμε"], name=u"Ελληνικά"), 'cp737') + ]) + def test_to_csv_compression(self, s, encoding, compression): with ensure_clean() as filename: - s.to_csv(filename, compression=compression, header=True) - + s.to_csv(filename, compression=compression, encoding=encoding, + header=True) # test the round trip - to_csv -> read_csv - rs = pd.read_csv(filename, compression=compression, - index_col=0, squeeze=True) - assert_series_equal(s, rs) + result = pd.read_csv(filename, compression=compression, + encoding=encoding, index_col=0, squeeze=True) + assert_series_equal(s, result) + + # test the round trip using file handle - to_csv -> read_csv + f, _handles = _get_handle(filename, 'w', compression=compression, + encoding=encoding) + with f: + s.to_csv(f, encoding=encoding, header=True) + result = pd.read_csv(filename, compression=compression, + encoding=encoding, index_col=0, squeeze=True) + assert_series_equal(s, result) # explicitly ensure file was compressed with tm.decompress_file(filename, compression) as fh: - text = fh.read().decode('utf8') + text = fh.read().decode(encoding or 'utf8') assert s.name in text with tm.decompress_file(filename, compression) as fh: assert_series_equal(s, pd.read_csv(fh, index_col=0, - squeeze=True)) + squeeze=True, + encoding=encoding)) class TestSeriesIO(TestData): diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 97236f028b1c4..730c2b7865f1f 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -11,6 +11,7 @@ from pandas import (Index, Series, DataFrame, date_range, option_context, Categorical, period_range, timedelta_range) from pandas.core.index import MultiIndex +from pandas.core.base import StringMixin from pandas.compat import lrange, range, u from pandas import compat @@ -202,6 +203,35 @@ def test_latex_repr(self): class TestCategoricalRepr(object): + def test_categorical_repr_unicode(self): + # GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii', + # and we are working in PY2, then rendering a Categorical could raise + # UnicodeDecodeError by trying to decode when it shouldn't + + class County(StringMixin): + name = u'San Sebastián' + state = u'PR' + + def __unicode__(self): + return self.name + u', ' + self.state + + cat = pd.Categorical([County() for n in range(61)]) + idx = pd.Index(cat) + ser = idx.to_series() + + if compat.PY3: + # no reloading of sys, just check that the default (utf8) works + # as expected + repr(ser) + str(ser) + + else: + # set sys.defaultencoding to ascii, then change it back after + # the test + with tm.set_defaultencoding('ascii'): + repr(ser) + str(ser) + def test_categorical_repr(self): a = Series(Categorical([1, 2, 3, 4])) exp = u("0 1\n1 2\n2 3\n3 4\n" + diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 01b4ea6eaa238..13e0d1b12c372 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -141,19 +141,20 @@ def test_sort_index_inplace(self): assert result is None tm.assert_series_equal(random_order, self.ts) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True - res = s.sort_index(level='A') + res = s.sort_index(level=level) assert_series_equal(backwards, res) # GH13496 - # rows share same level='A': sort has no effect without remaining lvls - res = s.sort_index(level='A', sort_remaining=False) + # sort has no effect without remaining lvls + res = s.sort_index(level=level, sort_remaining=False) assert_series_equal(s, res) def test_sort_index_kind(self): diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py index 6c0c83cf65ff7..b3330f866ba1f 100644 --- a/pandas/tests/sparse/test_array.py +++ b/pandas/tests/sparse/test_array.py @@ -454,6 +454,17 @@ def test_values_asarray(self): assert_almost_equal(self.arr.to_dense(), self.arr_data) assert_almost_equal(self.arr.sp_values, np.asarray(self.arr)) + @pytest.mark.parametrize('data,shape,dtype', [ + ([0, 0, 0, 0, 0], (5,), None), + ([], (0,), None), + ([0], (1,), None), + (['A', 'A', np.nan, 'B'], (4,), np.object) + ]) + def test_shape(self, data, shape, dtype): + # GH 21126 + out = SparseArray(data, dtype=dtype) + assert out.shape == shape + def test_to_dense(self): vals = np.array([1, np.nan, np.nan, 3, np.nan]) res = SparseArray(vals).to_dense() diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 0b329f64dafa3..576239e49455e 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,15 +1,17 @@ # -*- coding: utf-8 -*- import pytest +import os import collections from functools import partial import numpy as np -from pandas import Series, Timestamp +from pandas import Series, DataFrame, Timestamp from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops +from pandas.io.common import _get_handle import pandas.util.testing as tm @@ -222,3 +224,59 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) + + +@pytest.mark.parametrize('obj', [ + DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(obj, method, compression): + if not compression: + pytest.skip("only test compression case.") + + with tm.ensure_clean() as filename: + getattr(obj, method)(filename, compression=compression) + compressed = os.path.getsize(filename) + getattr(obj, method)(filename, compression=None) + uncompressed = os.path.getsize(filename) + assert uncompressed > compressed + + +@pytest.mark.parametrize('obj', [ + DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_csv', 'to_json']) +def test_compression_size_fh(obj, method, compression_only): + + with tm.ensure_clean() as filename: + f, _handles = _get_handle(filename, 'w', compression=compression_only) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + compressed = os.path.getsize(filename) + with tm.ensure_clean() as filename: + f, _handles = _get_handle(filename, 'w', compression=None) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + uncompressed = os.path.getsize(filename) + assert uncompressed > compressed + + +# GH 21227 +def test_compression_warning(compression_only): + df = DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']) + with tm.ensure_clean() as filename: + f, _handles = _get_handle(filename, 'w', compression=compression_only) + with tm.assert_produces_warning(RuntimeWarning, + check_stacklevel=False): + with f: + df.to_csv(f, compression=compression_only) diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index ead9ba1e26e2d..79d3aad493182 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -4,9 +4,10 @@ """ import pytest +import re from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap, lfilter, builtins, iterkeys, itervalues, iteritems, - next, get_range_parameters, PY2) + next, get_range_parameters, PY2, re_type) class TestBuiltinIterators(object): @@ -89,3 +90,7 @@ def test_get_range_parameters(self, start, stop, step): assert start_result == start_expected assert stop_result == stop_expected assert step_result == step_expected + + +def test_re_type(): + assert isinstance(re.compile(''), re_type) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index a595d9f18d6b8..cf98cff97669a 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -2,6 +2,9 @@ """ Testing that we work in the downstream packages """ +import subprocess +import sys + import pytest import numpy as np # noqa from pandas import DataFrame @@ -53,6 +56,11 @@ def test_xarray(df): assert df.to_xarray() is not None +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) + + @tm.network def test_statsmodels(): @@ -87,6 +95,7 @@ def test_pandas_gbq(df): pandas_gbq = import_module('pandas_gbq') # noqa +@pytest.mark.xfail(reason="0.7.0 pending") @tm.network def test_pandas_datareader(): diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 7973b27601237..128ab0572ba55 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2717,3 +2717,10 @@ def test_panel_index(): np.repeat([1, 2, 3], 4)], names=['time', 'panel']) tm.assert_index_equal(index, expected) + + +def test_panel_np_all(): + with catch_warnings(record=True): + wp = Panel({"A": DataFrame({'b': [1, 2]})}) + result = np.all(wp) + assert result == np.bool_(True) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index c1257cce9a9a4..bcc50a25623a1 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -2870,6 +2870,16 @@ def test_asfreq_bug(self): freq='1T')) assert_frame_equal(result, expected) + def test_resample_with_nat(self): + # GH 13223 + index = pd.to_timedelta(['0s', pd.NaT, '2s']) + result = DataFrame({'value': [2, 3, 5]}, index).resample('1s').mean() + expected = DataFrame({'value': [2.5, np.nan, 5.0]}, + index=timedelta_range('0 day', + periods=3, + freq='1S')) + assert_frame_equal(result, expected) + class TestResamplerGrouper(object): diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index d8e90ae0e1b35..cfd88f41f855e 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -41,7 +41,7 @@ def win_types(request): return request.param -@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian', 'slepian']) +@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian']) def win_types_special(request): return request.param @@ -389,8 +389,8 @@ def test_constructor(self, which): c(window=2, min_periods=1, center=False) # GH 13383 - c(0) with pytest.raises(ValueError): + c(0) c(-1) # not valid @@ -409,7 +409,6 @@ def test_constructor_with_win_type(self, which): # GH 13383 o = getattr(self, which) c = o.rolling - c(0, win_type='boxcar') with pytest.raises(ValueError): c(-1, win_type='boxcar') @@ -1079,8 +1078,7 @@ def test_cmov_window_special(self, win_types_special): kwds = { 'kaiser': {'beta': 1.}, 'gaussian': {'std': 1.}, - 'general_gaussian': {'power': 2., 'width': 2.}, - 'slepian': {'width': 0.5}} + 'general_gaussian': {'power': 2., 'width': 2.}} vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) @@ -1090,8 +1088,6 @@ def test_cmov_window_special(self, win_types_special): 13.65671, 12.01002, np.nan, np.nan], 'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161, 13.08516, 12.95111, 12.74577, np.nan, np.nan], - 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331, - 12.96079, 12.77008, np.nan, np.nan], 'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129, 12.90702, 12.83757, np.nan, np.nan] } diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 5369b1a94a956..00701ca2be946 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -1,4 +1,3 @@ -import os from distutils.version import LooseVersion from datetime import date, datetime, timedelta @@ -455,14 +454,15 @@ def test_add(self, offset_types, tz): assert isinstance(result, Timestamp) assert result == expected_localize - def test_pickle_v0_15_2(self): + def test_pickle_v0_15_2(self, datapath): offsets = {'DateOffset': DateOffset(years=1), 'MonthBegin': MonthBegin(1), 'Day': Day(1), 'YearBegin': YearBegin(1), 'Week': Week(1)} - pickle_path = os.path.join(tm.get_data_path(), - 'dateoffset_0_15_2.pickle') + + pickle_path = datapath('tseries', 'offsets', 'data', + 'dateoffset_0_15_2.pickle') # This code was executed once on v0.15.2 to generate the pickle: # with open(pickle_path, 'wb') as f: pickle.dump(offsets, f) # @@ -528,7 +528,10 @@ def test_repr(self): assert repr(self.offset) == '<BusinessDay>' assert repr(self.offset2) == '<2 * BusinessDays>' - expected = '<BusinessDay: offset=datetime.timedelta(1)>' + if compat.PY37: + expected = '<BusinessDay: offset=datetime.timedelta(days=1)>' + else: + expected = '<BusinessDay: offset=datetime.timedelta(1)>' assert repr(self.offset + timedelta(1)) == expected def test_with_offset(self): @@ -1642,7 +1645,10 @@ def test_repr(self): assert repr(self.offset) == '<CustomBusinessDay>' assert repr(self.offset2) == '<2 * CustomBusinessDays>' - expected = '<BusinessDay: offset=datetime.timedelta(1)>' + if compat.PY37: + expected = '<BusinessDay: offset=datetime.timedelta(days=1)>' + else: + expected = '<BusinessDay: offset=datetime.timedelta(1)>' assert repr(self.offset + timedelta(1)) == expected def test_with_offset(self): @@ -1848,12 +1854,10 @@ def _check_roundtrip(obj): _check_roundtrip(self.offset2) _check_roundtrip(self.offset * 2) - def test_pickle_compat_0_14_1(self): + def test_pickle_compat_0_14_1(self, datapath): hdays = [datetime(2013, 1, 1) for ele in range(4)] - - pth = tm.get_data_path() - - cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle')) + pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle') + cday0_14_1 = read_pickle(pth) cday = CDay(holidays=hdays) assert cday == cday0_14_1 diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index d6f58d16bcf64..4d34987e14f75 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import os import pandas as pd import pytest import numpy as np @@ -503,6 +504,25 @@ def test_index_equal_metadata_message(self): with tm.assert_raises_regex(AssertionError, expected): assert_index_equal(idx1, idx2) + def test_categorical_index_equality(self): + expected = """Index are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_index_equality_relax_categories_check(self): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertSeriesEqual(object): @@ -600,6 +620,25 @@ def test_series_equal_message(self): assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]), check_less_precise=True) + def test_categorical_series_equality(self): + expected = """Attributes are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_series_equality_relax_categories_check(self): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertFrameEqual(object): @@ -803,3 +842,15 @@ def test_locale(self): # GH9744 locales = tm.get_locales() assert len(locales) >= 1 + + +def test_datapath_missing(datapath, request): + if not request.config.getoption("--strict-data-files"): + pytest.skip("Need to set '--strict-data-files'") + + with pytest.raises(ValueError): + datapath('not_a_file') + + result = datapath('data', 'iris.csv') + expected = os.path.join('pandas', 'tests', 'data', 'iris.csv') + assert result == expected diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 145be7f85b193..c049dfc874940 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -433,6 +433,26 @@ def teardown_class(cls): del cls.locales del cls.current_locale + def test_can_set_locale_valid_set(self): + # Setting the default locale should return True + assert tm.can_set_locale('') is True + + def test_can_set_locale_invalid_set(self): + # Setting an invalid locale should return False + assert tm.can_set_locale('non-existent_locale') is False + + def test_can_set_locale_invalid_get(self, monkeypatch): + # In some cases, an invalid locale can be set, + # but a subsequent getlocale() raises a ValueError + # See GH 22129 + + def mockgetlocale(): + raise ValueError() + + with monkeypatch.context() as m: + m.setattr(locale, 'getlocale', mockgetlocale) + assert tm.can_set_locale('') is False + def test_get_locales(self): # all systems should have at least a single locale assert len(tm.get_locales()) > 0 @@ -466,7 +486,7 @@ def test_set_locale(self): enc = codecs.lookup(enc).name new_locale = lang, enc - if not tm._can_set_locale(new_locale): + if not tm.can_set_locale(new_locale): with pytest.raises(locale.Error): with tm.set_locale(new_locale): pass diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 749165f894819..c294110d89ec5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1090,12 +1090,17 @@ def apply(self, other): class CustomBusinessMonthEnd(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') _prefix = 'CBM' class CustomBusinessMonthBegin(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', + 'beginning') _prefix = 'CBMS' diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 624fbbbd4f05e..6b55554cdc941 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -4,7 +4,7 @@ import types import warnings from textwrap import dedent, wrap -from functools import wraps, update_wrapper +from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS def deprecate(name, alternative, version, alt_name=None, @@ -20,18 +20,18 @@ def deprecate(name, alternative, version, alt_name=None, Parameters ---------- name : str - Name of function to deprecate - alternative : str - Name of function to use instead + Name of function to deprecate. + alternative : func + Function to use instead. version : str - Version of pandas in which the method has been deprecated + Version of pandas in which the method has been deprecated. alt_name : str, optional - Name to use in preference of alternative.__name__ + Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str - The message to display in the warning. - Default is '{name} is deprecated. Use {alt_name} instead.' + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ @@ -39,25 +39,26 @@ def deprecate(name, alternative, version, alt_name=None, warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) - @wraps(alternative) + # adding deprecated directive to the docstring + msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) + msg = '\n '.join(wrap(msg, 70)) + + @Substitution(version=version, msg=msg) + @Appender(alternative.__doc__) def wrapper(*args, **kwargs): + """ + .. deprecated:: %(version)s + + %(msg)s + + """ warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) - # adding deprecated directive to the docstring - msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) - tpl = dedent(""" - .. deprecated:: {version} - - {msg} - - {rest} - """) - rest = getattr(wrapper, '__doc__', '') - docstring = tpl.format(version=version, - msg='\n '.join(wrap(msg, 70)), - rest=dedent(rest)) - wrapper.__doc__ = docstring + # Since we are using Substitution to create the required docstring, + # remove that from the attributes that should be assigned to the wrapper + assignments = tuple(x for x in WRAPPER_ASSIGNMENTS if x != '__doc__') + update_wrapper(wrapper, alternative, assigned=assignments) return wrapper diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 89d90258f58e0..c6ab24403d58d 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -23,13 +23,13 @@ def test_foo(): For more information, refer to the ``pytest`` documentation on ``skipif``. """ - import pytest import locale from distutils.version import LooseVersion from pandas.compat import (is_platform_windows, is_platform_32bit, PY3, import_lzma) +from pandas.compat.numpy import _np_version_under1p15 from pandas.core.computation.expressions import (_USE_NUMEXPR, _NUMEXPR_INSTALLED) @@ -160,6 +160,9 @@ def decorated_func(func): skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(), reason="Missing matplotlib dependency") + +skip_if_np_lt_115 = pytest.mark.skipif(_np_version_under1p15, + reason="NumPy 1.15 or greater required") skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present") skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e1484a9c1b390..bb79c25126fab 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -6,7 +6,6 @@ import sys import tempfile import warnings -import inspect import os import subprocess import locale @@ -479,6 +478,8 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): A string of the form <language_country>.<encoding>. For example to set the current locale to US English with a UTF8 encoding, you would pass "en_US.UTF-8". + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. Notes ----- @@ -490,37 +491,37 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): try: locale.setlocale(lc_var, new_locale) - - try: - normalized_locale = locale.getlocale() - except ValueError: - yield new_locale + normalized_locale = locale.getlocale() + if com._all_not_none(*normalized_locale): + yield '.'.join(normalized_locale) else: - if com._all_not_none(*normalized_locale): - yield '.'.join(normalized_locale) - else: - yield new_locale + yield new_locale finally: locale.setlocale(lc_var, current_locale) -def _can_set_locale(lc): - """Check to see if we can set a locale without throwing an exception. +def can_set_locale(lc, lc_var=locale.LC_ALL): + """ + Check to see if we can set a locale, and subsequently get the locale, + without raising an Exception. Parameters ---------- lc : str The locale to attempt to set. + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. Returns ------- - isvalid : bool + is_valid : bool Whether the passed locale can be set """ try: - with set_locale(lc): + with set_locale(lc, lc_var=lc_var): pass - except locale.Error: # horrible name for a Exception subclass + except (ValueError, + locale.Error): # horrible name for a Exception subclass return False else: return True @@ -547,12 +548,34 @@ def _valid_locales(locales, normalize): else: normalizer = lambda x: x.strip() - return list(filter(_can_set_locale, map(normalizer, locales))) + return list(filter(can_set_locale, map(normalizer, locales))) # ----------------------------------------------------------------------------- # Stdout / stderr decorators +@contextmanager +def set_defaultencoding(encoding): + """ + Set default encoding (as given by sys.getdefaultencoding()) to the given + encoding; restore on exit. + + Parameters + ---------- + encoding : str + """ + if not PY2: + raise ValueError("set_defaultencoding context is only available " + "in Python 2.") + orig = sys.getdefaultencoding() + reload(sys) # noqa:F821 + sys.setdefaultencoding(encoding) + try: + yield + finally: + sys.setdefaultencoding(orig) + + def capture_stdout(f): """ Decorator to capture stdout in a buffer so that it can be checked @@ -729,15 +752,6 @@ def ensure_clean(filename=None, return_filelike=False): print("Exception on removing file: {error}".format(error=e)) -def get_data_path(f=''): - """Return the path of a data file, these are relative to the current test - directory. - """ - # get our callers file - _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1] - base_dir = os.path.abspath(os.path.dirname(filename)) - return os.path.join(base_dir, 'data', f) - # ----------------------------------------------------------------------------- # Comparators @@ -778,8 +792,12 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, def _check_types(l, r, obj='Index'): if exact: - assert_class_equal(left, right, exact=exact, obj=obj) - assert_attr_equal('dtype', l, r, obj=obj) + assert_class_equal(l, r, exact=exact, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if check_categorical: + assert_attr_equal('dtype', l, r, obj=obj) + # allow string-like to have different inferred_types if l.inferred_type in ('string', 'unicode'): assert r.inferred_type in ('string', 'unicode') @@ -829,7 +847,8 @@ def _get_ilevel_values(index, level): # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) - if check_exact: + # skip exact index checking when `check_categorical` is False + if check_exact and check_categorical: if not left.equals(right): diff = np.sum((left.values != right.values) .astype(int)) * 100.0 / len(left) @@ -950,23 +969,23 @@ def is_sorted(seq): def assert_categorical_equal(left, right, check_dtype=True, - obj='Categorical', check_category_order=True): + check_category_order=True, obj='Categorical'): """Test that Categoricals are equivalent. Parameters ---------- - left, right : Categorical - Categoricals to compare + left : Categorical + right : Categorical check_dtype : bool, default True Check that integer dtype of the codes are the same - obj : str, default 'Categorical' - Specify object name being compared, internally used to show appropriate - assertion message check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message """ _check_isinstance(left, right, Categorical) @@ -1020,7 +1039,7 @@ def raise_assert_detail(obj, message, left, right, diff=None): def assert_numpy_array_equal(left, right, strict_nan=False, check_dtype=True, err_msg=None, - obj='numpy array', check_same=None): + check_same=None, obj='numpy array'): """ Checks that 'np.ndarray' is equivalent Parameters @@ -1033,11 +1052,11 @@ def assert_numpy_array_equal(left, right, strict_nan=False, check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message - check_same : None|'copy'|'same', default None - Ensure left and right refer/do not refer to the same memory area """ # instance validation diff --git a/setup.cfg b/setup.cfg index 6d9657737a8bd..9ec967c25e225 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,4 +32,5 @@ markers = slow: mark a test as slow network: mark a test as network high_memory: mark a test as a high-memory only -doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL +addopts = --strict-data-files +doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL \ No newline at end of file diff --git a/setup.py b/setup.py index 6febe674fb2a1..5d6bbbcf7b862 100755 --- a/setup.py +++ b/setup.py @@ -217,6 +217,7 @@ def build_extensions(self): 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Programming Language :: Cython', 'Topic :: Scientific/Engineering'] @@ -453,10 +454,10 @@ def pxd(name): return pjoin('pandas', name + '.pxd') -# args to ignore warnings if is_platform_windows(): extra_compile_args = [] else: + # args to ignore warnings extra_compile_args = ['-Wno-unused-function'] lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h', @@ -733,11 +734,7 @@ def pxd(name): maintainer=AUTHOR, version=versioneer.get_version(), packages=find_packages(include=['pandas', 'pandas.*']), - package_data={'': ['data/*', 'templates/*'], - 'pandas.tests.io': ['data/legacy_hdf/*.h5', - 'data/legacy_pickle/*/*.pickle', - 'data/legacy_msgpack/*/*.msgpack', - 'data/html_encoding/*.html']}, + package_data={'': ['templates/*', '_libs/*.dll']}, ext_modules=extensions, maintainer_email=EMAIL, description=DESCRIPTION,
XLSB support in read_excel() #8540
https://api.github.com/repos/pandas-dev/pandas/pulls/24658
2019-01-07T07:00:05Z
2019-01-07T16:13:19Z
null
2019-01-07T16:13:19Z
BUG: DataFrame/Series.tz_convert does not modifies original data with copy=False
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 4bc50695e1ecd..eb2c59f6a3519 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1596,6 +1596,7 @@ Timezones - Bug in :func:`to_datetime` where ``utc=True`` was not respected when passing a :class:`Timestamp` (:issue:`24415`) - Bug in :meth:`DataFrame.any` returns wrong value when ``axis=1`` and the data is of datetimelike type (:issue:`23070`) - Bug in :meth:`DatetimeIndex.to_period` where a timezone aware index was converted to UTC first before creating :class:`PeriodIndex` (:issue:`22905`) +- Bug in :meth:`DataFrame.tz_localize`, :meth:`DataFrame.tz_convert`, :meth:`Series.tz_localize`, and :meth:`Series.tz_convert` where ``copy=False`` would mutate the original argument inplace (:issue:`6326`) Offsets ^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d271081aeaa51..1e6ae71660617 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9226,7 +9226,7 @@ def _tz_convert(ax, tz): ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) - result.set_axis(ax, axis=axis, inplace=True) + result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self) def tz_localize(self, tz, axis=0, level=None, copy=True, @@ -9390,7 +9390,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) - result.set_axis(ax, axis=axis, inplace=True) + result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self) # ---------------------------------------------------------------------- diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index f124a4c3f3570..fd6587c73b8fa 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -180,3 +180,19 @@ def test_boolean_compare_transpose_tzindex_with_dst(self, tz): result = df.T == df.T expected = DataFrame(True, index=list('ab'), columns=idx) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('copy', [True, False]) + @pytest.mark.parametrize('method, tz', [ + ['tz_localize', None], + ['tz_convert', 'Europe/Berlin'] + ]) + def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz): + # GH 6326 + result = DataFrame(np.arange(0, 5), + index=date_range('20131027', periods=5, + freq='1H', tz=tz)) + getattr(result, method)('UTC', copy=copy) + expected = DataFrame(np.arange(0, 5), + index=date_range('20131027', periods=5, + freq='1H', tz=tz)) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index 7f49f94ef57ce..ec644a8e93da2 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -348,3 +348,19 @@ def test_series_truncate_datetimeindex_tz(self): result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) expected = Series([1, 2, 3], index=idx[1:4]) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('copy', [True, False]) + @pytest.mark.parametrize('method, tz', [ + ['tz_localize', None], + ['tz_convert', 'Europe/Berlin'] + ]) + def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz): + # GH 6326 + result = Series(np.arange(0, 5), + index=date_range('20131027', periods=5, freq='1H', + tz=tz)) + getattr(result, method)('UTC', copy=copy) + expected = Series(np.arange(0, 5), + index=date_range('20131027', periods=5, freq='1H', + tz=tz)) + tm.assert_series_equal(result, expected)
- [x] closes #6326 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This also impacted `tz_localize` as well. Non blocking for 0.24
https://api.github.com/repos/pandas-dev/pandas/pulls/24657
2019-01-07T06:29:49Z
2019-01-08T00:27:59Z
2019-01-08T00:27:58Z
2020-10-29T21:23:11Z
STY: use pytest.raises context syntax
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index d175f669703c7..52dce572c6d4f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2720,7 +2720,12 @@ def test_format_percentiles(): expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] assert result == expected - pytest.raises(ValueError, fmt.format_percentiles, [0.1, np.nan, 0.5]) - pytest.raises(ValueError, fmt.format_percentiles, [-0.001, 0.1, 0.5]) - pytest.raises(ValueError, fmt.format_percentiles, [2, 0.1, 0.5]) - pytest.raises(ValueError, fmt.format_percentiles, [0.1, 0.5, 'a']) + msg = r"percentiles should all be in the interval \[0,1\]" + with pytest.raises(ValueError, match=msg): + fmt.format_percentiles([0.1, np.nan, 0.5]) + with pytest.raises(ValueError, match=msg): + fmt.format_percentiles([-0.001, 0.1, 0.5]) + with pytest.raises(ValueError, match=msg): + fmt.format_percentiles([2, 0.1, 0.5]) + with pytest.raises(ValueError, match=msg): + fmt.format_percentiles([0.1, 0.5, 'a']) diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index fd0953a4834ca..3bf699cc8a1f0 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -197,7 +197,9 @@ def test_meta_name_conflict(self): 'data': [{'foo': 'something', 'bar': 'else'}, {'foo': 'something2', 'bar': 'else2'}]}] - with pytest.raises(ValueError): + msg = (r"Conflicting metadata name (foo|bar)," + " need distinguishing prefix") + with pytest.raises(ValueError, match=msg): json_normalize(data, 'data', meta=['foo', 'bar']) result = json_normalize(data, 'data', meta=['foo', 'bar'], @@ -366,13 +368,15 @@ def test_json_normalize_errors(self): assert j.fillna('').to_dict() == expected - pytest.raises(KeyError, - json_normalize, data=i['Trades'], - record_path=[['general', 'stocks']], - meta=[['general', 'tradeid'], - ['general', 'trade_version']], - errors='raise' - ) + msg = ("Try running with errors='ignore' as key 'trade_version'" + " is not always present") + with pytest.raises(KeyError, match=msg): + json_normalize( + data=i['Trades'], + record_path=[['general', 'stocks']], + meta=[['general', 'tradeid'], + ['general', 'trade_version']], + errors='raise') def test_donot_drop_nonevalues(self): # GH21356 diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 5468413033002..155083900f83a 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -101,8 +101,12 @@ def test_frame_non_unique_index(self): df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1], columns=['x', 'y']) - pytest.raises(ValueError, df.to_json, orient='index') - pytest.raises(ValueError, df.to_json, orient='columns') + msg = "DataFrame index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient='index') + msg = "DataFrame index must be unique for orient='columns'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient='columns') assert_frame_equal(df, read_json(df.to_json(orient='split'), orient='split')) @@ -116,9 +120,15 @@ def test_frame_non_unique_columns(self): df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2], columns=['x', 'x']) - pytest.raises(ValueError, df.to_json, orient='index') - pytest.raises(ValueError, df.to_json, orient='columns') - pytest.raises(ValueError, df.to_json, orient='records') + msg = "DataFrame columns must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient='index') + msg = "DataFrame columns must be unique for orient='columns'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient='columns') + msg = "DataFrame columns must be unique for orient='records'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient='records') assert_frame_equal(df, read_json(df.to_json(orient='split'), orient='split', dtype=False)) @@ -156,13 +166,16 @@ def _check_orient(df, orient, dtype=None, numpy=False, # if we are not unique, then check that we are raising ValueError # for the appropriate orients if not df.index.is_unique and orient in ['index', 'columns']: - pytest.raises( - ValueError, lambda: df.to_json(orient=orient)) + msg = ("DataFrame index must be unique for orient='{}'" + .format(orient)) + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient) return if (not df.columns.is_unique and orient in ['index', 'columns', 'records']): - pytest.raises( - ValueError, lambda: df.to_json(orient=orient)) + # TODO: not executed. fix this. + with pytest.raises(ValueError, match='ksjkajksfjksjfkjs'): + df.to_json(orient=orient) return dfjson = df.to_json(orient=orient) @@ -326,21 +339,24 @@ def _check_all_orients(df, dtype=None, convert_axes=True, _check_orient(df.transpose().transpose(), "index", dtype=False) def test_frame_from_json_bad_data(self): - pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}')) + with pytest.raises(ValueError, match='Expected object or value'): + read_json(StringIO('{"key":b:a:d}')) # too few indices json = StringIO('{"columns":["A","B"],' '"index":["2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') - pytest.raises(ValueError, read_json, json, - orient="split") + msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + with pytest.raises(ValueError, match=msg): + read_json(json, orient="split") # too many columns json = StringIO('{"columns":["A","B","C"],' '"index":["1","2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') - pytest.raises(AssertionError, read_json, json, - orient="split") + msg = "3 columns passed, passed data had 2 columns" + with pytest.raises(AssertionError, match=msg): + read_json(json, orient="split") # bad key json = StringIO('{"badkey":["A","B"],' @@ -414,7 +430,9 @@ def test_frame_to_json_float_precision(self): def test_frame_to_json_except(self): df = DataFrame([1, 2, 3]) - pytest.raises(ValueError, df.to_json, orient="garbage") + msg = "Invalid value 'garbage' for option 'orient'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient="garbage") def test_frame_empty(self): df = DataFrame(columns=['jim', 'joe']) @@ -540,7 +558,8 @@ def __str__(self): # check if non-printable content throws appropriate Exception df_nonprintable = DataFrame({'A': [binthing]}) - with pytest.raises(OverflowError): + msg = "Unsupported UTF-8 sequence length when encoding string" + with pytest.raises(OverflowError, match=msg): df_nonprintable.to_json() # the same with multiple columns threw segfaults @@ -565,7 +584,9 @@ def test_label_overflow(self): def test_series_non_unique_index(self): s = Series(['a', 'b'], index=[1, 1]) - pytest.raises(ValueError, s.to_json, orient='index') + msg = "Series index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient='index') assert_series_equal(s, read_json(s.to_json(orient='split'), orient='split', typ='series')) @@ -637,7 +658,9 @@ def _check_all_orients(series, dtype=None, check_index_type=True): def test_series_to_json_except(self): s = Series([1, 2, 3]) - pytest.raises(ValueError, s.to_json, orient="garbage") + msg = "Invalid value 'garbage' for option 'orient'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient="garbage") def test_series_from_json_precise_float(self): s = Series([4.56, 4.56, 4.56]) @@ -752,8 +775,9 @@ def test_w_date(date, date_unit=None): test_w_date('20130101 20:43:42.123456', date_unit='us') test_w_date('20130101 20:43:42.123456789', date_unit='ns') - pytest.raises(ValueError, df.to_json, date_format='iso', - date_unit='foo') + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + df.to_json(date_format='iso', date_unit='foo') def test_date_format_series(self): def test_w_date(date, date_unit=None): @@ -774,8 +798,9 @@ def test_w_date(date, date_unit=None): test_w_date('20130101 20:43:42.123456789', date_unit='ns') ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index) - pytest.raises(ValueError, ts.to_json, date_format='iso', - date_unit='foo') + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ts.to_json(date_format='iso', date_unit='foo') def test_date_unit(self): df = self.tsframe.copy() @@ -940,14 +965,16 @@ def test_default_handler_numpy_unsupported_dtype(self): assert df.to_json(default_handler=str, orient="values") == expected def test_default_handler_raises(self): + msg = "raisin" + def my_handler_raises(obj): - raise TypeError("raisin") - pytest.raises(TypeError, - DataFrame({'a': [1, 2, object()]}).to_json, - default_handler=my_handler_raises) - pytest.raises(TypeError, - DataFrame({'a': [1, 2, complex(4, -5)]}).to_json, - default_handler=my_handler_raises) + raise TypeError(msg) + with pytest.raises(TypeError, match=msg): + DataFrame({'a': [1, 2, object()]}).to_json( + default_handler=my_handler_raises) + with pytest.raises(TypeError, match=msg): + DataFrame({'a': [1, 2, complex(4, -5)]}).to_json( + default_handler=my_handler_raises) def test_categorical(self): # GH4377 df.to_json segfaults with non-ndarray blocks diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 7f5241def597f..63ba9bc0f0488 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -422,7 +422,9 @@ def test_datetime_units(self): roundtrip = ujson.decode(ujson.encode(val, date_unit='ns')) assert roundtrip == stamp.value - pytest.raises(ValueError, ujson.encode, val, date_unit='foo') + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ujson.encode(val, date_unit='foo') def test_encode_to_utf8(self): unencoded = "\xe6\x97\xa5\xd1\x88" @@ -695,7 +697,9 @@ def recursive_attr(self): def __str__(self): return str(self.val) - pytest.raises(OverflowError, ujson.encode, _TestObject("foo")) + msg = "Maximum recursion level reached" + with pytest.raises(OverflowError, match=msg): + ujson.encode(_TestObject("foo")) assert '"foo"' == ujson.encode(_TestObject("foo"), default_handler=str) diff --git a/pandas/tests/io/msgpack/test_except.py b/pandas/tests/io/msgpack/test_except.py index d670e846c382a..cd894109e989f 100644 --- a/pandas/tests/io/msgpack/test_except.py +++ b/pandas/tests/io/msgpack/test_except.py @@ -22,16 +22,16 @@ def test_raise_from_object_hook(self): def hook(_): raise DummyException() - pytest.raises(DummyException, unpackb, packb({}), object_hook=hook) - pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}), - object_hook=hook) - pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}), - object_pairs_hook=hook) - pytest.raises(DummyException, unpackb, - packb({'fizz': {'buzz': 'spam'}}), object_hook=hook) - pytest.raises(DummyException, unpackb, - packb({'fizz': {'buzz': 'spam'}}), - object_pairs_hook=hook) + with pytest.raises(DummyException): + unpackb(packb({}), object_hook=hook) + with pytest.raises(DummyException): + unpackb(packb({'fizz': 'buzz'}), object_hook=hook) + with pytest.raises(DummyException): + unpackb(packb({'fizz': 'buzz'}), object_pairs_hook=hook) + with pytest.raises(DummyException): + unpackb(packb({'fizz': {'buzz': 'spam'}}), object_hook=hook) + with pytest.raises(DummyException): + unpackb(packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook) def test_invalid_value(self): msg = "Unpack failed: error" diff --git a/pandas/tests/io/msgpack/test_limits.py b/pandas/tests/io/msgpack/test_limits.py index cad51da483c71..dd8dc8da607a4 100644 --- a/pandas/tests/io/msgpack/test_limits.py +++ b/pandas/tests/io/msgpack/test_limits.py @@ -12,22 +12,26 @@ class TestLimits(object): def test_integer(self): x = -(2 ** 63) assert unpackb(packb(x)) == x - pytest.raises((OverflowError, ValueError), packb, x - 1) + msg = (r"((long |Python )?(int )?too (big|large) to convert" + r"( to C (unsigned )?long))?") + with pytest.raises((OverflowError, ValueError), match=msg): + packb(x - 1) x = 2 ** 64 - 1 assert unpackb(packb(x)) == x - pytest.raises((OverflowError, ValueError), packb, x + 1) + with pytest.raises((OverflowError, ValueError), match=msg): + packb(x + 1) def test_array_header(self): packer = Packer() packer.pack_array_header(2 ** 32 - 1) - pytest.raises((OverflowError, ValueError), - packer.pack_array_header, 2 ** 32) + with pytest.raises((OverflowError, ValueError)): + packer.pack_array_header(2 ** 32) def test_map_header(self): packer = Packer() packer.pack_map_header(2 ** 32 - 1) - pytest.raises((OverflowError, ValueError), - packer.pack_array_header, 2 ** 32) + with pytest.raises((OverflowError, ValueError)): + packer.pack_array_header(2 ** 32) def test_max_str_len(self): d = 'x' * 3 diff --git a/pandas/tests/io/msgpack/test_obj.py b/pandas/tests/io/msgpack/test_obj.py index 4a6b89907954e..471212f1bfe32 100644 --- a/pandas/tests/io/msgpack/test_obj.py +++ b/pandas/tests/io/msgpack/test_obj.py @@ -47,31 +47,28 @@ def test_decode_pairs_hook(self): assert unpacked[1] == prod_sum def test_only_one_obj_hook(self): - pytest.raises(TypeError, unpackb, b'', object_hook=lambda x: x, - object_pairs_hook=lambda x: x) + msg = "object_pairs_hook and object_hook are mutually exclusive" + with pytest.raises(TypeError, match=msg): + unpackb(b'', object_hook=lambda x: x, + object_pairs_hook=lambda x: x) def test_bad_hook(self): - def f(): + msg = r"can't serialize \(1\+2j\)" + with pytest.raises(TypeError, match=msg): packed = packb([3, 1 + 2j], default=lambda o: o) unpacked = unpackb(packed, use_list=1) # noqa - pytest.raises(TypeError, f) - def test_array_hook(self): packed = packb([1, 2, 3]) unpacked = unpackb(packed, list_hook=self._arr_to_str, use_list=1) assert unpacked == '123' def test_an_exception_in_objecthook1(self): - def f(): + with pytest.raises(DecodeError, match='Ooops!'): packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}}) unpackb(packed, object_hook=self.bad_complex_decoder) - pytest.raises(DecodeError, f) - def test_an_exception_in_objecthook2(self): - def f(): + with pytest.raises(DecodeError, match='Ooops!'): packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]}) unpackb(packed, list_hook=self.bad_complex_decoder, use_list=1) - - pytest.raises(DecodeError, f) diff --git a/pandas/tests/io/msgpack/test_pack.py b/pandas/tests/io/msgpack/test_pack.py index f69ac0a0bc4ce..8c82d0d2cf870 100644 --- a/pandas/tests/io/msgpack/test_pack.py +++ b/pandas/tests/io/msgpack/test_pack.py @@ -67,12 +67,17 @@ def testIgnoreUnicodeErrors(self): assert re == "abcdef" def testStrictUnicodeUnpack(self): - pytest.raises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'), - encoding='utf-8', use_list=1) + msg = (r"'utf-*8' codec can't decode byte 0xed in position 3:" + " invalid continuation byte") + with pytest.raises(UnicodeDecodeError, match=msg): + unpackb(packb(b'abc\xeddef'), encoding='utf-8', use_list=1) def testStrictUnicodePack(self): - pytest.raises(UnicodeEncodeError, packb, compat.u("abc\xeddef"), - encoding='ascii', unicode_errors='strict') + msg = (r"'ascii' codec can't encode character u*'\\xed' in position 3:" + r" ordinal not in range\(128\)") + with pytest.raises(UnicodeEncodeError, match=msg): + packb(compat.u("abc\xeddef"), encoding='ascii', + unicode_errors='strict') def testIgnoreErrorsPack(self): re = unpackb( @@ -82,7 +87,9 @@ def testIgnoreErrorsPack(self): assert re == compat.u("abcdef") def testNoEncoding(self): - pytest.raises(TypeError, packb, compat.u("abc"), encoding=None) + msg = "Can't encode unicode string: no encoding is specified" + with pytest.raises(TypeError, match=msg): + packb(compat.u("abc"), encoding=None) def testDecodeBinary(self): re = unpackb(packb("abc"), encoding=None, use_list=1)
xref #24332
https://api.github.com/repos/pandas-dev/pandas/pulls/24655
2019-01-06T22:27:43Z
2019-01-08T12:59:42Z
2019-01-08T12:59:42Z
2019-01-08T19:52:19Z
TST/CI: disable hypothesis deadline for test_tick_add_sub
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index a1940241b4c56..dcc7afa797063 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -4,7 +4,7 @@ """ from datetime import datetime, timedelta -from hypothesis import assume, example, given, strategies as st +from hypothesis import assume, example, given, settings, strategies as st import numpy as np import pytest @@ -38,6 +38,7 @@ def test_delta_to_tick(): @pytest.mark.parametrize('cls', tick_classes) +@settings(deadline=None) # GH 24641 @example(n=2, m=3) @example(n=800, m=300) @example(n=1000, m=5)
- [x] closes #24641 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24654
2019-01-06T17:09:53Z
2019-01-06T19:19:00Z
2019-01-06T19:19:00Z
2019-01-06T19:19:05Z
REF: clear out a bunch of algos, de-duplicate a bunch of core.missing
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index a064aec492df2..b3c519ab99b6e 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -369,31 +369,6 @@ ctypedef fused algos_t: uint8_t -# TODO: unused; needed? -@cython.wraparound(False) -@cython.boundscheck(False) -cpdef map_indices(ndarray[algos_t] index): - """ - Produce a dict mapping the values of the input array to their respective - locations. - - Example: - array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1} - - Better to do this with Cython because of the enormous speed boost. - """ - cdef: - Py_ssize_t i, length - dict result = {} - - length = len(index) - - for i in range(length): - result[index[i]] = i - - return result - - @cython.boundscheck(False) @cython.wraparound(False) def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): @@ -458,20 +433,6 @@ def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): return indexer -pad_float64 = pad["float64_t"] -pad_float32 = pad["float32_t"] -pad_object = pad["object"] -pad_int64 = pad["int64_t"] -pad_int32 = pad["int32_t"] -pad_int16 = pad["int16_t"] -pad_int8 = pad["int8_t"] -pad_uint64 = pad["uint64_t"] -pad_uint32 = pad["uint32_t"] -pad_uint16 = pad["uint16_t"] -pad_uint8 = pad["uint8_t"] -pad_bool = pad["uint8_t"] - - @cython.boundscheck(False) @cython.wraparound(False) def pad_inplace(ndarray[algos_t] values, @@ -509,15 +470,6 @@ def pad_inplace(ndarray[algos_t] values, val = values[i] -pad_inplace_float64 = pad_inplace["float64_t"] -pad_inplace_float32 = pad_inplace["float32_t"] -pad_inplace_object = pad_inplace["object"] -pad_inplace_int64 = pad_inplace["int64_t"] -pad_inplace_int32 = pad_inplace["int32_t"] -pad_inplace_uint64 = pad_inplace["uint64_t"] -pad_inplace_bool = pad_inplace["uint8_t"] - - @cython.boundscheck(False) @cython.wraparound(False) def pad_2d_inplace(ndarray[algos_t, ndim=2] values, @@ -557,15 +509,6 @@ def pad_2d_inplace(ndarray[algos_t, ndim=2] values, val = values[j, i] -pad_2d_inplace_float64 = pad_2d_inplace["float64_t"] -pad_2d_inplace_float32 = pad_2d_inplace["float32_t"] -pad_2d_inplace_object = pad_2d_inplace["object"] -pad_2d_inplace_int64 = pad_2d_inplace["int64_t"] -pad_2d_inplace_int32 = pad_2d_inplace["int32_t"] -pad_2d_inplace_uint64 = pad_2d_inplace["uint64_t"] -pad_2d_inplace_bool = pad_2d_inplace["uint8_t"] - - """ Backfilling logic for generating fill vector @@ -657,20 +600,6 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): return indexer -backfill_float64 = backfill["float64_t"] -backfill_float32 = backfill["float32_t"] -backfill_object = backfill["object"] -backfill_int64 = backfill["int64_t"] -backfill_int32 = backfill["int32_t"] -backfill_int16 = backfill["int16_t"] -backfill_int8 = backfill["int8_t"] -backfill_uint64 = backfill["uint64_t"] -backfill_uint32 = backfill["uint32_t"] -backfill_uint16 = backfill["uint16_t"] -backfill_uint8 = backfill["uint8_t"] -backfill_bool = backfill["uint8_t"] - - @cython.boundscheck(False) @cython.wraparound(False) def backfill_inplace(ndarray[algos_t] values, @@ -708,15 +637,6 @@ def backfill_inplace(ndarray[algos_t] values, val = values[i] -backfill_inplace_float64 = backfill_inplace["float64_t"] -backfill_inplace_float32 = backfill_inplace["float32_t"] -backfill_inplace_object = backfill_inplace["object"] -backfill_inplace_int64 = backfill_inplace["int64_t"] -backfill_inplace_int32 = backfill_inplace["int32_t"] -backfill_inplace_uint64 = backfill_inplace["uint64_t"] -backfill_inplace_bool = backfill_inplace["uint8_t"] - - @cython.boundscheck(False) @cython.wraparound(False) def backfill_2d_inplace(ndarray[algos_t, ndim=2] values, @@ -756,15 +676,6 @@ def backfill_2d_inplace(ndarray[algos_t, ndim=2] values, val = values[j, i] -backfill_2d_inplace_float64 = backfill_2d_inplace["float64_t"] -backfill_2d_inplace_float32 = backfill_2d_inplace["float32_t"] -backfill_2d_inplace_object = backfill_2d_inplace["object"] -backfill_2d_inplace_int64 = backfill_2d_inplace["int64_t"] -backfill_2d_inplace_int32 = backfill_2d_inplace["int32_t"] -backfill_2d_inplace_uint64 = backfill_2d_inplace["uint64_t"] -backfill_2d_inplace_bool = backfill_2d_inplace["uint8_t"] - - @cython.wraparound(False) @cython.boundscheck(False) def arrmap(ndarray[algos_t] index, object func): @@ -875,20 +786,6 @@ def is_monotonic(ndarray[algos_t, ndim=1] arr, bint timelike): return is_monotonic_inc, is_monotonic_dec, is_strict_monotonic -is_monotonic_float64 = is_monotonic["float64_t"] -is_monotonic_float32 = is_monotonic["float32_t"] -is_monotonic_object = is_monotonic["object"] -is_monotonic_int64 = is_monotonic["int64_t"] -is_monotonic_int32 = is_monotonic["int32_t"] -is_monotonic_int16 = is_monotonic["int16_t"] -is_monotonic_int8 = is_monotonic["int8_t"] -is_monotonic_uint64 = is_monotonic["uint64_t"] -is_monotonic_uint32 = is_monotonic["uint32_t"] -is_monotonic_uint16 = is_monotonic["uint16_t"] -is_monotonic_uint8 = is_monotonic["uint8_t"] -is_monotonic_bool = is_monotonic["uint8_t"] - - # generated from template include "algos_common_helper.pxi" include "algos_rank_helper.pxi" diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in index 7d9ba420525c8..91599fa223b57 100644 --- a/pandas/_libs/algos_common_helper.pxi.in +++ b/pandas/_libs/algos_common_helper.pxi.in @@ -70,18 +70,6 @@ def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr, for j in range(start, stop): out[i, j] = arr[i, j] - arr[i, j - periods] - -def put2d_{{name}}_{{dest_name}}(ndarray[{{c_type}}, ndim=2, cast=True] values, - ndarray[int64_t] indexer, Py_ssize_t loc, - ndarray[{{dest_type}}] out): - cdef: - Py_ssize_t i, j, k - - k = len(values) - for j in range(k): - i = indexer[j] - out[i] = values[j, loc] - {{endfor}} # ---------------------------------------------------------------------- diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 365713d579d60..c919086701536 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -392,7 +392,7 @@ cdef class DatetimeEngine(Int64Engine): return self.vgetter().view('i8') def _call_monotonic(self, values): - return algos.is_monotonic_int64(values, timelike=True) + return algos.is_monotonic(values, timelike=True) cpdef get_loc(self, object val): if is_definitely_invalid_key(val): @@ -451,14 +451,13 @@ cdef class DatetimeEngine(Int64Engine): if other.dtype != self._get_box_dtype(): return np.repeat(-1, len(other)).astype('i4') other = np.asarray(other).view('i8') - return algos.pad_int64(self._get_index_values(), other, limit=limit) + return algos.pad(self._get_index_values(), other, limit=limit) def get_backfill_indexer(self, other, limit=None): if other.dtype != self._get_box_dtype(): return np.repeat(-1, len(other)).astype('i4') other = np.asarray(other).view('i8') - return algos.backfill_int64(self._get_index_values(), other, - limit=limit) + return algos.backfill(self._get_index_values(), other, limit=limit) cdef class TimedeltaEngine(DatetimeEngine): @@ -492,15 +491,15 @@ cdef class PeriodEngine(Int64Engine): freq = super(PeriodEngine, self).vgetter().freq ordinal = periodlib.extract_ordinals(other, freq) - return algos.pad_int64(self._get_index_values(), - np.asarray(ordinal), limit=limit) + return algos.pad(self._get_index_values(), + np.asarray(ordinal), limit=limit) def get_backfill_indexer(self, other, limit=None): freq = super(PeriodEngine, self).vgetter().freq ordinal = periodlib.extract_ordinals(other, freq) - return algos.backfill_int64(self._get_index_values(), - np.asarray(ordinal), limit=limit) + return algos.backfill(self._get_index_values(), + np.asarray(ordinal), limit=limit) def get_indexer_non_unique(self, targets): freq = super(PeriodEngine, self).vgetter().freq diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index b393283bfd4ca..6383c1534fb44 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -31,15 +31,13 @@ dtypes = [('Float64', 'float64', 'float64_t', 'Float64', 'float64'), cdef class {{name}}Engine(IndexEngine): def _call_monotonic(self, values): - return algos.is_monotonic_{{dtype}}(values, timelike=False) + return algos.is_monotonic(values, timelike=False) def get_backfill_indexer(self, other, limit=None): - return algos.backfill_{{dtype}}(self._get_index_values(), - other, limit=limit) + return algos.backfill(self._get_index_values(), other, limit=limit) def get_pad_indexer(self, other, limit=None): - return algos.pad_{{dtype}}(self._get_index_values(), - other, limit=limit) + return algos.pad(self._get_index_values(), other, limit=limit) cdef _make_hash_table(self, n): return _hash.{{hashtable_name}}HashTable(n) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index ee9aa9e229126..15538b8196684 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -452,107 +452,56 @@ def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, return values -def _interp_wrapper(f, wrap_dtype, na_override=None): - def wrapper(arr, mask, limit=None): - view = arr.view(wrap_dtype) - f(view, mask, limit=limit) - - return wrapper - - -_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64) -_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64) -_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64, np.int64) -_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64, - np.int64) +def _cast_values_for_fillna(values, dtype): + """ + Cast values to a dtype that algos.pad and algos.backfill can handle. + """ + # TODO: for int-dtypes we make a copy, but for everything else this + # alters the values in-place. Is this intentional? + if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or + is_timedelta64_dtype(dtype)): + values = values.view(np.int64) -def pad_1d(values, limit=None, mask=None, dtype=None): - if dtype is None: - dtype = values.dtype - _method = None - if is_float_dtype(values): - name = 'pad_inplace_{name}'.format(name=dtype.name) - _method = getattr(algos, name, None) - elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): - _method = _pad_1d_datetime elif is_integer_dtype(values): + # NB: this check needs to come after the datetime64 check above values = ensure_float64(values) - _method = algos.pad_inplace_float64 - elif values.dtype == np.object_: - _method = algos.pad_inplace_object - elif is_timedelta64_dtype(values): - # NaTs are treated identically to datetime64, so we can dispatch - # to that implementation - _method = _pad_1d_datetime - - if _method is None: - raise ValueError('Invalid dtype for pad_1d [{name}]' - .format(name=dtype.name)) - if mask is None: - mask = isna(values) - mask = mask.view(np.uint8) - _method(values, mask, limit=limit) return values -def backfill_1d(values, limit=None, mask=None, dtype=None): +def _fillna_prep(values, mask=None, dtype=None): + # boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d if dtype is None: dtype = values.dtype - _method = None - if is_float_dtype(values): - name = 'backfill_inplace_{name}'.format(name=dtype.name) - _method = getattr(algos, name, None) - elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): - _method = _backfill_1d_datetime - elif is_integer_dtype(values): - values = ensure_float64(values) - _method = algos.backfill_inplace_float64 - elif values.dtype == np.object_: - _method = algos.backfill_inplace_object - elif is_timedelta64_dtype(values): - # NaTs are treated identically to datetime64, so we can dispatch - # to that implementation - _method = _backfill_1d_datetime - - if _method is None: - raise ValueError('Invalid dtype for backfill_1d [{name}]' - .format(name=dtype.name)) if mask is None: + # This needs to occur before datetime/timedeltas are cast to int64 mask = isna(values) + + values = _cast_values_for_fillna(values, dtype) + mask = mask.view(np.uint8) + return values, mask + - _method(values, mask, limit=limit) +def pad_1d(values, limit=None, mask=None, dtype=None): + values, mask = _fillna_prep(values, mask, dtype) + algos.pad_inplace(values, mask, limit=limit) return values -def pad_2d(values, limit=None, mask=None, dtype=None): - if dtype is None: - dtype = values.dtype - _method = None - if is_float_dtype(values): - name = 'pad_2d_inplace_{name}'.format(name=dtype.name) - _method = getattr(algos, name, None) - elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): - _method = _pad_2d_datetime - elif is_integer_dtype(values): - values = ensure_float64(values) - _method = algos.pad_2d_inplace_float64 - elif values.dtype == np.object_: - _method = algos.pad_2d_inplace_object +def backfill_1d(values, limit=None, mask=None, dtype=None): + values, mask = _fillna_prep(values, mask, dtype) + algos.backfill_inplace(values, mask, limit=limit) + return values - if _method is None: - raise ValueError('Invalid dtype for pad_2d [{name}]' - .format(name=dtype.name)) - if mask is None: - mask = isna(values) - mask = mask.view(np.uint8) +def pad_2d(values, limit=None, mask=None, dtype=None): + values, mask = _fillna_prep(values, mask, dtype) if np.all(values.shape): - _method(values, mask, limit=limit) + algos.pad_2d_inplace(values, mask, limit=limit) else: # for test coverage pass @@ -560,30 +509,10 @@ def pad_2d(values, limit=None, mask=None, dtype=None): def backfill_2d(values, limit=None, mask=None, dtype=None): - if dtype is None: - dtype = values.dtype - _method = None - if is_float_dtype(values): - name = 'backfill_2d_inplace_{name}'.format(name=dtype.name) - _method = getattr(algos, name, None) - elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): - _method = _backfill_2d_datetime - elif is_integer_dtype(values): - values = ensure_float64(values) - _method = algos.backfill_2d_inplace_float64 - elif values.dtype == np.object_: - _method = algos.backfill_2d_inplace_object - - if _method is None: - raise ValueError('Invalid dtype for backfill_2d [{name}]' - .format(name=dtype.name)) - - if mask is None: - mask = isna(values) - mask = mask.view(np.uint8) + values, mask = _fillna_prep(values, mask, dtype) if np.all(values.shape): - _method(values, mask, limit=limit) + algos.backfill_2d_inplace(values, mask, limit=limit) else: # for test coverage pass diff --git a/pandas/tests/indexing/test_indexing_engines.py b/pandas/tests/indexing/test_indexing_engines.py index dcdfbcb7fbea2..57b85fd46a44e 100644 --- a/pandas/tests/indexing/test_indexing_engines.py +++ b/pandas/tests/indexing/test_indexing_engines.py @@ -155,7 +155,7 @@ def test_get_backfill_indexer(self): new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_backfill_indexer(new) - expected = libalgos.backfill_object(arr, new) + expected = libalgos.backfill["object"](arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer(self): @@ -165,5 +165,5 @@ def test_get_pad_indexer(self): new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_pad_indexer(new) - expected = libalgos.pad_object(arr, new) + expected = libalgos.pad["object"](arr, new) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 42e9b1f5af8ad..5951f5802f50e 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1491,19 +1491,19 @@ def test_pad_backfill_object_segfault(): old = np.array([], dtype='O') new = np.array([datetime(2010, 12, 31)], dtype='O') - result = libalgos.pad_object(old, new) + result = libalgos.pad["object"](old, new) expected = np.array([-1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) - result = libalgos.pad_object(new, old) + result = libalgos.pad["object"](new, old) expected = np.array([], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) - result = libalgos.backfill_object(old, new) + result = libalgos.backfill["object"](old, new) expected = np.array([-1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) - result = libalgos.backfill_object(new, old) + result = libalgos.backfill["object"](new, old) expected = np.array([], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) @@ -1535,7 +1535,7 @@ def test_backfill(self): old = Index([1, 5, 10]) new = Index(lrange(12)) - filler = libalgos.backfill_int64(old.values, new.values) + filler = libalgos.backfill["int64_t"](old.values, new.values) expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.int64) @@ -1544,7 +1544,7 @@ def test_backfill(self): # corner case old = Index([1, 4]) new = Index(lrange(5, 10)) - filler = libalgos.backfill_int64(old.values, new.values) + filler = libalgos.backfill["int64_t"](old.values, new.values) expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler) @@ -1553,7 +1553,7 @@ def test_pad(self): old = Index([1, 5, 10]) new = Index(lrange(12)) - filler = libalgos.pad_int64(old.values, new.values) + filler = libalgos.pad["int64_t"](old.values, new.values) expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.int64) @@ -1562,7 +1562,7 @@ def test_pad(self): # corner case old = Index([5, 10]) new = Index(lrange(5)) - filler = libalgos.pad_int64(old.values, new.values) + filler = libalgos.pad["int64_t"](old.values, new.values) expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24652
2019-01-06T15:56:29Z
2019-01-07T22:04:53Z
2019-01-07T22:04:53Z
2019-01-07T22:04:56Z
REF: io/formats/html.py (and io/formats/format.py)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index f8ee9c273fd59..bdeed58d856cc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -730,15 +730,14 @@ def to_html(self, classes=None, notebook=False, border=None): .. versionadded:: 0.19.0 """ - from pandas.io.formats.html import HTMLFormatter - html_renderer = HTMLFormatter(self, classes=classes, notebook=notebook, - border=border, table_id=self.table_id, - render_links=self.render_links) + from pandas.io.formats.html import HTMLFormatter, NotebookFormatter + Klass = NotebookFormatter if notebook else HTMLFormatter + html = Klass(self, classes=classes, border=border).render() if hasattr(self.buf, 'write'): - html_renderer.write_result(self.buf) + buffer_put_lines(self.buf, html) elif isinstance(self.buf, compat.string_types): with open(self.buf, 'w') as f: - html_renderer.write_result(f) + buffer_put_lines(f, html) else: raise TypeError('buf is not a file name and it has no write ' ' method') diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 90f1dbe704806..f41749e0a7745 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -16,17 +16,23 @@ from pandas.core.config import get_option from pandas.io.common import _is_url -from pandas.io.formats.format import ( - TableFormatter, buffer_put_lines, get_level_lengths) +from pandas.io.formats.format import TableFormatter, get_level_lengths from pandas.io.formats.printing import pprint_thing class HTMLFormatter(TableFormatter): + """ + Internal class for formatting output data in html. + This class is intended for shared functionality between + DataFrame.to_html() and DataFrame._repr_html_(). + Any logic in common with other output formatting methods + should ideally be inherited from classes in format.py + and this class responsible for only producing html markup. + """ indent_delta = 2 - def __init__(self, formatter, classes=None, notebook=False, border=None, - table_id=None, render_links=False): + def __init__(self, formatter, classes=None, border=None): self.fmt = formatter self.classes = classes @@ -36,12 +42,11 @@ def __init__(self, formatter, classes=None, notebook=False, border=None, self.bold_rows = self.fmt.kwds.get('bold_rows', False) self.escape = self.fmt.kwds.get('escape', True) self.show_dimensions = self.fmt.show_dimensions - self.notebook = notebook if border is None: border = get_option('display.html.border') self.border = border - self.table_id = table_id - self.render_links = render_links + self.table_id = self.fmt.table_id + self.render_links = self.fmt.render_links @property def show_row_idx_names(self): @@ -137,48 +142,7 @@ def write_tr(self, line, indent=0, indent_delta=0, header=False, indent -= indent_delta self.write('</tr>', indent) - def write_style(self): - # We use the "scoped" attribute here so that the desired - # style properties for the data frame are not then applied - # throughout the entire notebook. - template_first = """\ - <style scoped>""" - template_last = """\ - </style>""" - template_select = """\ - .dataframe %s { - %s: %s; - }""" - element_props = [('tbody tr th:only-of-type', - 'vertical-align', - 'middle'), - ('tbody tr th', - 'vertical-align', - 'top')] - if isinstance(self.columns, ABCMultiIndex): - element_props.append(('thead tr th', - 'text-align', - 'left')) - if self.show_row_idx_names: - element_props.append(('thead tr:last-of-type th', - 'text-align', - 'right')) - else: - element_props.append(('thead th', - 'text-align', - 'right')) - template_mid = '\n\n'.join(map(lambda t: template_select % t, - element_props)) - template = dedent('\n'.join((template_first, - template_mid, - template_last))) - self.write(template) - - def write_result(self, buf): - if self.notebook: - self.write('<div>') - self.write_style() - + def render(self): self._write_table() if self.should_show_dimensions: @@ -188,10 +152,7 @@ def write_result(self, buf): by=by, cols=len(self.frame.columns))) - if self.notebook: - self.write('</div>') - - buffer_put_lines(buf, self.elements) + return self.elements def _write_table(self, indent=0): _classes = ['dataframe'] # Default class. @@ -516,3 +477,55 @@ def _write_hierarchical_rows(self, fmt_values, indent): row.insert(self.row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) + + +class NotebookFormatter(HTMLFormatter): + """ + Internal class for formatting output data in html for display in Jupyter + Notebooks. This class is intended for functionality specific to + DataFrame._repr_html_() and DataFrame.to_html(notebook=True) + """ + + def write_style(self): + # We use the "scoped" attribute here so that the desired + # style properties for the data frame are not then applied + # throughout the entire notebook. + template_first = """\ + <style scoped>""" + template_last = """\ + </style>""" + template_select = """\ + .dataframe %s { + %s: %s; + }""" + element_props = [('tbody tr th:only-of-type', + 'vertical-align', + 'middle'), + ('tbody tr th', + 'vertical-align', + 'top')] + if isinstance(self.columns, ABCMultiIndex): + element_props.append(('thead tr th', + 'text-align', + 'left')) + if self.show_row_idx_names: + element_props.append(('thead tr:last-of-type th', + 'text-align', + 'right')) + else: + element_props.append(('thead th', + 'text-align', + 'right')) + template_mid = '\n\n'.join(map(lambda t: template_select % t, + element_props)) + template = dedent('\n'.join((template_first, + template_mid, + template_last))) + self.write(template) + + def render(self): + self.write('<div>') + self.write_style() + super(NotebookFormatter, self).render() + self.write('</div>') + return self.elements
- [ n/a] follow-on #24637 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ n/a] whatsnew entry this refactor further limits responsibilities of functions/module and should allow a simpler implementation for fixes of #17004, #9690 etc.
https://api.github.com/repos/pandas-dev/pandas/pulls/24651
2019-01-06T11:25:07Z
2019-01-08T13:07:50Z
2019-01-08T13:07:50Z
2019-01-08T19:51:32Z
DOC: Fixing more doc warings and wrong .. code-block :: directive (space before colon)
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index d4a2945f8e3a5..3aa5a10807fd0 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -162,6 +162,14 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then # invgrep -R --include '*.py' -E '[[:space:]] pytest.raises' pandas/tests # RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for wrong space after code-block directive and before colon (".. code-block ::" instead of ".. code-block::")' ; echo $MSG + invgrep -R --include="*.rst" ".. code-block ::" doc/source + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Check for wrong space after ipython directive and before colon (".. ipython ::" instead of ".. ipython::")' ; echo $MSG + invgrep -R --include="*.rst" ".. ipython ::" doc/source + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check that no file in the repo contains tailing whitespaces' ; echo $MSG set -o pipefail if [[ "$AZURE" == "true" ]]; then diff --git a/doc/source/api/arrays.rst b/doc/source/api/arrays.rst index d8ce2ab7bf73e..a727c3a2c292a 100644 --- a/doc/source/api/arrays.rst +++ b/doc/source/api/arrays.rst @@ -195,7 +195,7 @@ Methods A collection of timedeltas may be stored in a :class:`TimedeltaArray`. -.. autosumarry:: +.. autosummary:: :toctree: generated/ arrays.TimedeltaArray diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 424ea7370849c..94bec5c5bc83d 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -115,7 +115,7 @@ Series is ndarray-like ``Series`` acts very similarly to a ``ndarray``, and is a valid argument to most NumPy functions. However, operations such as slicing will also slice the index. -.. ipython :: python +.. ipython:: python s[0] s[:3] @@ -171,7 +171,7 @@ Series is dict-like A Series is like a fixed-size dict in that you can get and set values by index label: -.. ipython :: python +.. ipython:: python s['a'] s['e'] = 12. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index add1a4e587240..3fe416c48f670 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -743,9 +743,9 @@ Selecting Random Samples A random selection of rows or columns from a Series, DataFrame, or Panel with the :meth:`~DataFrame.sample` method. The method will sample rows by default, and accepts a specific number of rows/columns to return, or a fraction of rows. -.. ipython :: python +.. ipython:: python - s = pd.Series([0,1,2,3,4,5]) + s = pd.Series([0, 1, 2, 3, 4, 5]) # When no arguments are passed, returns 1 row. s.sample() @@ -759,9 +759,9 @@ A random selection of rows or columns from a Series, DataFrame, or Panel with th By default, ``sample`` will return each row at most once, but one can also sample with replacement using the ``replace`` option: -.. ipython :: python +.. ipython:: python - s = pd.Series([0,1,2,3,4,5]) + s = pd.Series([0, 1, 2, 3, 4, 5]) # Without replacement (default): s.sample(n=6, replace=False) @@ -774,9 +774,9 @@ By default, each row has an equal probability of being selected, but if you want to have different probabilities, you can pass the ``sample`` function sampling weights as ``weights``. These weights can be a list, a NumPy array, or a Series, but they must be of the same length as the object you are sampling. Missing values will be treated as a weight of zero, and inf values are not allowed. If weights do not sum to 1, they will be re-normalized by dividing all weights by the sum of the weights. For example: -.. ipython :: python +.. ipython:: python - s = pd.Series([0,1,2,3,4,5]) + s = pd.Series([0, 1, 2, 3, 4, 5]) example_weights = [0, 0, 0.2, 0.2, 0.2, 0.4] s.sample(n=3, weights=example_weights) @@ -788,23 +788,24 @@ When applied to a DataFrame, you can use a column of the DataFrame as sampling w (provided you are sampling rows and not columns) by simply passing the name of the column as a string. -.. ipython :: python +.. ipython:: python - df2 = pd.DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]}) - df2.sample(n = 3, weights = 'weight_column') + df2 = pd.DataFrame({'col1': [9, 8, 7, 6], + 'weight_column': [0.5, 0.4, 0.1, 0]}) + df2.sample(n=3, weights='weight_column') ``sample`` also allows users to sample columns instead of rows using the ``axis`` argument. -.. ipython :: python +.. ipython:: python - df3 = pd.DataFrame({'col1':[1,2,3], 'col2':[2,3,4]}) + df3 = pd.DataFrame({'col1': [1, 2, 3], 'col2': [2, 3, 4]}) df3.sample(n=1, axis=1) Finally, one can also set a seed for ``sample``'s random number generator using the ``random_state`` argument, which will accept either an integer (as a seed) or a NumPy RandomState object. -.. ipython :: python +.. ipython:: python - df4 = pd.DataFrame({'col1':[1,2,3], 'col2':[2,3,4]}) + df4 = pd.DataFrame({'col1': [1, 2, 3], 'col2': [2, 3, 4]}) # With a given seed, the sample will always draw the same rows. df4.sample(n=2, random_state=2) diff --git a/doc/source/io.rst b/doc/source/io.rst index 2149ee7fb46d9..dd1cde0bdff73 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -578,7 +578,7 @@ Duplicate names parsing If the file or header contains duplicate names, pandas will by default distinguish between them so as to prevent overwriting data: -.. ipython :: python +.. ipython:: python data = ('a,b,a\n' '0,1,2\n' @@ -590,7 +590,7 @@ which modifies a series of duplicate columns 'X', ..., 'X' to become 'X', 'X.1', ..., 'X.N'. If ``mangle_dupe_cols=False``, duplicate data can arise: -.. code-block :: python +.. code-block:: ipython In [2]: data = 'a,b,a\n0,1,2\n3,4,5' In [3]: pd.read_csv(StringIO(data), mangle_dupe_cols=False) @@ -602,7 +602,7 @@ arise: To prevent users from encountering this problem with duplicate data, a ``ValueError`` exception is raised if ``mangle_dupe_cols != True``: -.. code-block :: python +.. code-block:: ipython In [2]: data = 'a,b,a\n0,1,2\n3,4,5' In [3]: pd.read_csv(StringIO(data), mangle_dupe_cols=False) diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst index 4413fc5cec2a9..b2dd8229c91f3 100644 --- a/doc/source/whatsnew/v0.12.0.rst +++ b/doc/source/whatsnew/v0.12.0.rst @@ -191,7 +191,7 @@ I/O Enhancements You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so - .. ipython :: python + .. ipython:: python :okwarning: df = pd.DataFrame({'a': range(3), 'b': list('abc')}) @@ -296,7 +296,7 @@ Other Enhancements For example you can do - .. ipython :: python + .. ipython:: python df = pd.DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]}) df.replace(regex=r'\s*\.\s*', value=np.nan) @@ -306,7 +306,7 @@ Other Enhancements Regular string replacement still works as expected. For example, you can do - .. ipython :: python + .. ipython:: python df.replace('.', np.nan) diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index cd0a7b0e3c339..7b9a8ba082411 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -1015,7 +1015,7 @@ Other: .. ipython:: python - business_dates = date_range(start='4/1/2014', end='6/30/2014', freq='B') + business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B') df = pd.DataFrame(1, index=business_dates, columns=['a', 'b']) # get the first, 4th, and last date index for each month df.groupby([df.index.year, df.index.month]).nth([0, 3, -1]) diff --git a/doc/source/whatsnew/v0.16.0.rst b/doc/source/whatsnew/v0.16.0.rst index 7ae17febe168d..f082bf656f23c 100644 --- a/doc/source/whatsnew/v0.16.0.rst +++ b/doc/source/whatsnew/v0.16.0.rst @@ -51,7 +51,7 @@ to be inserted (for example, a ``Series`` or NumPy array), or a function of one argument to be called on the ``DataFrame``. The new values are inserted, and the entire DataFrame (with all original and new columns) is returned. -.. ipython :: python +.. ipython:: python iris = pd.read_csv('data/iris.data') iris.head() @@ -61,10 +61,10 @@ and the entire DataFrame (with all original and new columns) is returned. Above was an example of inserting a precomputed value. We can also pass in a function to be evaluated. -.. ipython :: python +.. ipython:: python - iris.assign(sepal_ratio = lambda x: (x['SepalWidth'] / - x['SepalLength'])).head() + iris.assign(sepal_ratio=lambda x: (x['SepalWidth'] + / x['SepalLength'])).head() The power of ``assign`` comes when used in chains of operations. For example, we can limit the DataFrame to just those with a Sepal Length greater than 5, diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst index cfd7218e11157..7621cb9c1e27c 100644 --- a/doc/source/whatsnew/v0.16.1.rst +++ b/doc/source/whatsnew/v0.16.1.rst @@ -181,9 +181,9 @@ total number or rows or columns. It also has options for sampling with or withou for passing in a column for weights for non-uniform sampling, and for setting seed values to facilitate replication. (:issue:`2419`) -.. ipython :: python +.. ipython:: python - example_series = Series([0,1,2,3,4,5]) + example_series = pd.Series([0, 1, 2, 3, 4, 5]) # When no arguments are passed, returns 1 example_series.sample() @@ -207,9 +207,10 @@ facilitate replication. (:issue:`2419`) When applied to a DataFrame, one may pass the name of a column to specify sampling weights when sampling from rows. -.. ipython :: python +.. ipython:: python - df = DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]}) + df = pd.DataFrame({'col1': [9, 8, 7, 6], + 'weight_column': [0.5, 0.4, 0.1, 0]}) df.sample(n=3, weights='weight_column') diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index 4884d99d8fc91..6bde4f1b9cf99 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -84,9 +84,9 @@ The new implementation allows for having a single-timezone across all rows, with .. ipython:: python - df = DataFrame({'A': date_range('20130101', periods=3), - 'B': date_range('20130101', periods=3, tz='US/Eastern'), - 'C': date_range('20130101', periods=3, tz='CET')}) + df = pd.DataFrame({'A': pd.date_range('20130101', periods=3), + 'B': pd.date_range('20130101', periods=3, tz='US/Eastern'), + 'C': pd.date_range('20130101', periods=3, tz='CET')}) df df.dtypes @@ -442,17 +442,18 @@ Other enhancements - Added a ``DataFrame.round`` method to round the values to a variable number of decimal places (:issue:`10568`). - .. ipython :: python + .. ipython:: python - df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'], - index=['first', 'second', 'third']) + df = pd.DataFrame(np.random.random([3, 3]), + columns=['A', 'B', 'C'], + index=['first', 'second', 'third']) df df.round(2) df.round({'A': 0, 'C': 2}) - ``drop_duplicates`` and ``duplicated`` now accept a ``keep`` keyword to target first, last, and all duplicates. The ``take_last`` keyword is deprecated, see :ref:`here <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`) - .. ipython :: python + .. ipython:: python s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D']) s.drop_duplicates() @@ -630,13 +631,13 @@ Of course you can coerce this as well. .. ipython:: python - to_datetime(['2009-07-31', 'asd'], errors='coerce') + pd.to_datetime(['2009-07-31', 'asd'], errors='coerce') To keep the previous behavior, you can use ``errors='ignore'``: .. ipython:: python - to_datetime(['2009-07-31', 'asd'], errors='ignore') + pd.to_datetime(['2009-07-31', 'asd'], errors='ignore') Furthermore, ``pd.to_timedelta`` has gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword has been deprecated in favor of ``errors='coerce'``. @@ -655,13 +656,13 @@ Previous Behavior: .. code-block:: ipython - In [1]: Timestamp('2012Q2') + In [1]: pd.Timestamp('2012Q2') Traceback ... ValueError: Unable to parse 2012Q2 # Results in today's date. - In [2]: Timestamp('2014') + In [2]: pd.Timestamp('2014') Out [2]: 2014-08-12 00:00:00 v0.17.0 can parse them as below. It works on ``DatetimeIndex`` also. @@ -670,9 +671,9 @@ New Behavior: .. ipython:: python - Timestamp('2012Q2') - Timestamp('2014') - DatetimeIndex(['2012Q2', '2014']) + pd.Timestamp('2012Q2') + pd.Timestamp('2014') + pd.DatetimeIndex(['2012Q2', '2014']) .. note:: @@ -681,8 +682,8 @@ New Behavior: .. ipython:: python import pandas.tseries.offsets as offsets - Timestamp.now() - Timestamp.now() + offsets.DateOffset(years=1) + pd.Timestamp.now() + pd.Timestamp.now() + offsets.DateOffset(years=1) Changes to Index Comparisons ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -739,7 +740,7 @@ Boolean comparisons of a ``Series`` vs ``None`` will now be equivalent to compar .. ipython:: python - s = Series(range(3)) + s = pd.Series(range(3)) s.iloc[1] = None s @@ -807,11 +808,6 @@ Previous Behavior: New Behavior: -.. ipython:: python - :suppress: - - import os - .. ipython:: python df_with_missing.to_hdf('file.h5', @@ -824,6 +820,7 @@ New Behavior: .. ipython:: python :suppress: + import os os.remove('file.h5') See the :ref:`docs <io.hdf5>` for more details. @@ -876,7 +873,7 @@ Changes to ``Categorical.unique`` - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keep existing order. -.. ipython :: python +.. ipython:: python cat = pd.Categorical(['C', 'A', 'B', 'C'], categories=['A', 'B', 'C'], @@ -899,7 +896,7 @@ an integer, resulting in ``header=0`` for ``False`` and ``header=1`` for ``True` A ``bool`` input to ``header`` will now raise a ``TypeError`` -.. code-block :: python +.. code-block:: ipython In [29]: df = pd.read_csv('data.csv', header=False) TypeError: Passing a bool to header is invalid. Use header=None for no header or @@ -984,10 +981,12 @@ Removal of prior version deprecations/changes - Removal of ``colSpace`` parameter from ``DataFrame.to_string()``, in favor of ``col_space``, circa 0.8.0 version. - Removal of automatic time-series broadcasting (:issue:`2304`) - .. ipython :: python + .. ipython:: python np.random.seed(1234) - df = DataFrame(np.random.randn(5,2),columns=list('AB'),index=date_range('20130101',periods=5)) + df = DataFrame(np.random.randn(5, 2), + columns=list('AB'), + index=date_range('20130101', periods=5)) df Previously @@ -1008,9 +1007,9 @@ Removal of prior version deprecations/changes Current - .. ipython :: python + .. ipython:: python - df.add(df.A,axis='index') + df.add(df.A, axis='index') - Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`) diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst index ddde96c9f598d..233414dae957d 100644 --- a/doc/source/whatsnew/v0.17.1.rst +++ b/doc/source/whatsnew/v0.17.1.rst @@ -56,7 +56,7 @@ Here's a quick example: .. ipython:: python np.random.seed(123) - df = DataFrame(np.random.randn(10, 5), columns=list('abcde')) + df = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde')) html = df.style.background_gradient(cmap='viridis', low=.5) We can render the HTML to get the following table. @@ -84,7 +84,7 @@ Enhancements .. ipython:: python - df = DataFrame({'A': ['foo'] * 1000}) # noqa: F821 + df = pd.DataFrame({'A': ['foo'] * 1000}) # noqa: F821 df['B'] = df['A'].astype('category') # shows the '+' as we have object dtypes diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst index e9d4891df70c5..9ff6ad7188f5a 100644 --- a/doc/source/whatsnew/v0.18.0.rst +++ b/doc/source/whatsnew/v0.18.0.rst @@ -324,7 +324,7 @@ Timedeltas .. ipython:: python - t = timedelta_range('1 days 2 hr 13 min 45 us', periods=3, freq='d') + t = pd.timedelta_range('1 days 2 hr 13 min 45 us', periods=3, freq='d') t t.round('10min') @@ -810,8 +810,8 @@ performed with the ``Resampler`` objects with :meth:`~Resampler.backfill`, .. ipython:: python - s = pd.Series(np.arange(5,dtype='int64'), - index=date_range('2010-01-01', periods=5, freq='Q')) + s = pd.Series(np.arange(5, dtype='int64'), + index=pd.date_range('2010-01-01', periods=5, freq='Q')) s Previously diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst index 38208e9ff4cba..00d0d202d56cc 100644 --- a/doc/source/whatsnew/v0.19.0.rst +++ b/doc/source/whatsnew/v0.19.0.rst @@ -1160,7 +1160,7 @@ from ``n`` for the second, and so on, so that, when concatenated, they are ident the result of calling :func:`read_csv` without the ``chunksize=`` argument (:issue:`12185`). -.. ipython :: python +.. ipython:: python data = 'A,B\n0,1\n2,3\n4,5\n6,7' @@ -1178,7 +1178,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument **New behavior**: -.. ipython :: python +.. ipython:: python pd.concat(pd.read_csv(StringIO(data), chunksize=2))
Using `.. code-block ::` with a space before the colon wasn't making the blocks be validated for flake8 issues. Same for `ipython` directive. Making sure the space is not present, and fixing flake8 errors.
https://api.github.com/repos/pandas-dev/pandas/pulls/24650
2019-01-06T01:30:26Z
2019-01-06T16:03:49Z
2019-01-06T16:03:48Z
2019-01-07T14:39:46Z
DOC: whatsnew & linked edits
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 9981310b4a6fb..68f17a68784c9 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -921,7 +921,7 @@ If you need integer based selection, you should use ``iloc``: dfir.iloc[0:5] -.. _advanced.intervallindex: +.. _advanced.intervalindex: IntervalIndex ~~~~~~~~~~~~~ diff --git a/doc/source/api/arrays.rst b/doc/source/api/arrays.rst index d8ce2ab7bf73e..d8724e55980b9 100644 --- a/doc/source/api/arrays.rst +++ b/doc/source/api/arrays.rst @@ -330,13 +330,13 @@ a :class:`pandas.api.types.CategoricalDtype`. :toctree: generated/ :template: autosummary/class_without_autosummary.rst - api.types.CategoricalDtype + CategoricalDtype .. autosummary:: :toctree: generated/ - api.types.CategoricalDtype.categories - api.types.CategoricalDtype.ordered + CategoricalDtype.categories + CategoricalDtype.ordered Categorical data can be stored in a :class:`pandas.Categorical` diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 13681485d2f69..7c06288c01221 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -64,7 +64,7 @@ NumPy's type system to add support for custom arrays (see :ref:`basics.dtypes`). To get the actual data inside a :class:`Index` or :class:`Series`, use -the **array** property +the ``.array`` property .. ipython:: python @@ -72,11 +72,11 @@ the **array** property s.index.array :attr:`~Series.array` will always be an :class:`~pandas.api.extensions.ExtensionArray`. -The exact details of what an ``ExtensionArray`` is and why pandas uses them is a bit +The exact details of what an :class:`~pandas.api.extensions.ExtensionArray` is and why pandas uses them is a bit beyond the scope of this introduction. See :ref:`basics.dtypes` for more. If you know you need a NumPy array, use :meth:`~Series.to_numpy` -or :meth:`numpy.asarray`. +or :meth:`numpy.ndarray.asarray`. .. ipython:: python @@ -84,17 +84,17 @@ or :meth:`numpy.asarray`. np.asarray(s) When the Series or Index is backed by -an :class:`~pandas.api.extension.ExtensionArray`, :meth:`~Series.to_numpy` +an :class:`~pandas.api.extensions.ExtensionArray`, :meth:`~Series.to_numpy` may involve copying data and coercing values. See :ref:`basics.dtypes` for more. :meth:`~Series.to_numpy` gives some control over the ``dtype`` of the -resulting :class:`ndarray`. For example, consider datetimes with timezones. +resulting :class:`numpy.ndarray`. For example, consider datetimes with timezones. NumPy doesn't have a dtype to represent timezone-aware datetimes, so there are two possibly useful representations: -1. An object-dtype :class:`ndarray` with :class:`Timestamp` objects, each +1. An object-dtype :class:`numpy.ndarray` with :class:`Timestamp` objects, each with the correct ``tz`` -2. A ``datetime64[ns]`` -dtype :class:`ndarray`, where the values have +2. A ``datetime64[ns]`` -dtype :class:`numpy.ndarray`, where the values have been converted to UTC and the timezone discarded Timezones may be preserved with ``dtype=object`` @@ -106,6 +106,8 @@ Timezones may be preserved with ``dtype=object`` Or thrown away with ``dtype='datetime64[ns]'`` +.. ipython:: python + ser.to_numpy(dtype="datetime64[ns]") Getting the "raw data" inside a :class:`DataFrame` is possibly a bit more @@ -137,7 +139,7 @@ drawbacks: 1. When your Series contains an :ref:`extension type <extending.extension-types>`, it's unclear whether :attr:`Series.values` returns a NumPy array or the extension array. - :attr:`Series.array` will always return an ``ExtensionArray``, and will never + :attr:`Series.array` will always return an :class:`~pandas.api.extensions.ExtensionArray`, and will never copy data. :meth:`Series.to_numpy` will always return a NumPy array, potentially at the cost of copying / coercing values. 2. When your DataFrame contains a mixture of data types, :attr:`DataFrame.values` may diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 7fa386935e3f4..46a6a6da9da3a 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -164,6 +164,9 @@ See :ref:`integer_na` for more. .. _whatsnew_0240.enhancements.array: +Array +^^^^^ + A new top-level method :func:`array` has been added for creating 1-dimensional arrays (:issue:`22860`). This can be used to create any :ref:`extension array <extending.extension-types>`, including extension arrays registered by :ref:`3rd party libraries <ecosystem.extensions>`. See @@ -579,6 +582,41 @@ You must pass in the ``line_terminator`` explicitly, even in this case. ...: print(f.read()) Out[3]: b'string_with_lf,string_with_crlf\r\n"a\nbc","a\r\nbc"\r\n' +.. _whatsnew_0240.bug_fixes.nan_with_str_dtype: + +Proper handling of `np.NaN` in a string data-typed column with the Python engine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There was bug in :func:`read_excel` and :func:`read_csv` with the Python +engine, where missing values turned to ``'nan'`` with ``dtype=str`` and +``na_filter=True``. Now, these missing values are converted to the string +missing indicator, ``np.nan``. (:issue:`20377`) + +.. ipython:: python + :suppress: + + from pandas.compat import StringIO + +*Previous Behavior*: + +.. code-block:: ipython + + In [5]: data = 'a,b,c\n1,,3\n4,5,6' + In [6]: df = pd.read_csv(StringIO(data), engine='python', dtype=str, na_filter=True) + In [7]: df.loc[0, 'b'] + Out[7]: + 'nan' + +*New Behavior*: + +.. ipython:: python + + data = 'a,b,c\n1,,3\n4,5,6' + df = pd.read_csv(StringIO(data), engine='python', dtype=str, na_filter=True) + df.loc[0, 'b'] + +Notice how we now instead output ``np.nan`` itself instead of a stringified form of it. + .. _whatsnew_0240.api.timezone_offset_parsing: Parsing Datetime Strings with Timezone Offsets @@ -677,6 +715,9 @@ is the case with :attr:`Period.end_time`, for example .. _whatsnew_0240.api_breaking.datetime_unique: +Datetime w/tz and unique +^^^^^^^^^^^^^^^^^^^^^^^^ + The return type of :meth:`Series.unique` for datetime with timezone values has changed from an :class:`numpy.ndarray` of :class:`Timestamp` objects to a :class:`arrays.DatetimeArray` (:issue:`24024`). @@ -852,12 +893,6 @@ Period Subtraction Subtraction of a ``Period`` from another ``Period`` will give a ``DateOffset``. instead of an integer (:issue:`21314`) -.. ipython:: python - - june = pd.Period('June 2018') - april = pd.Period('April 2018') - june - april - *Previous Behavior*: .. code-block:: ipython @@ -869,13 +904,16 @@ instead of an integer (:issue:`21314`) In [4]: june - april Out [4]: 2 -Similarly, subtraction of a ``Period`` from a ``PeriodIndex`` will now return -an ``Index`` of ``DateOffset`` objects instead of an ``Int64Index`` +*New Behavior*: .. ipython:: python - pi = pd.period_range('June 2018', freq='M', periods=3) - pi - pi[0] + june = pd.Period('June 2018') + april = pd.Period('April 2018') + june - april + +Similarly, subtraction of a ``Period`` from a ``PeriodIndex`` will now return +an ``Index`` of ``DateOffset`` objects instead of an ``Int64Index`` *Previous Behavior*: @@ -886,6 +924,13 @@ an ``Index`` of ``DateOffset`` objects instead of an ``Int64Index`` In [3]: pi - pi[0] Out[3]: Int64Index([0, 1, 2], dtype='int64') +*New Behavior*: + +.. ipython:: python + + pi = pd.period_range('June 2018', freq='M', periods=3) + pi - pi[0] + .. _whatsnew_0240.api.timedelta64_subtract_nan: @@ -902,12 +947,6 @@ all-``NaT``. This is for compatibility with ``TimedeltaIndex`` and df = pd.DataFrame([pd.Timedelta(days=1)]) df -.. code-block:: ipython - - In [2]: df - np.nan - ... - TypeError: unsupported operand type(s) for -: 'TimedeltaIndex' and 'float' - *Previous Behavior*: .. code-block:: ipython @@ -919,6 +958,14 @@ all-``NaT``. This is for compatibility with ``TimedeltaIndex`` and 0 0 NaT +*New Behavior*: + +.. code-block:: ipython + + In [2]: df - np.nan + ... + TypeError: unsupported operand type(s) for -: 'TimedeltaIndex' and 'float' + .. _whatsnew_0240.api.dataframe_cmp_broadcasting: DataFrame Comparison Operations Broadcasting Changes @@ -935,13 +982,16 @@ The affected cases are: - a list or tuple with length matching the number of rows in the :class:`DataFrame` will now raise ``ValueError`` instead of operating column-by-column (:issue:`22880`. - a list or tuple with length matching the number of columns in the :class:`DataFrame` will now operate row-by-row instead of raising ``ValueError`` (:issue:`22880`). +.. ipython:: python + + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr) + df + *Previous Behavior*: .. code-block:: ipython - In [3]: arr = np.arange(6).reshape(3, 2) - In [4]: df = pd.DataFrame(arr) - In [5]: df == arr[[0], :] ...: # comparison previously broadcast where arithmetic would raise Out[5]: @@ -979,13 +1029,6 @@ The affected cases are: *New Behavior*: -.. ipython:: python - :okexcept: - - arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr) - df - .. ipython:: python # Comparison operations and arithmetic operations both broadcast. @@ -1018,12 +1061,16 @@ DataFrame Arithmetic Operations Broadcasting Changes ``np.ndarray`` objects now broadcast in the same way as ``np.ndarray`` broadcast. (:issue:`23000`) +.. ipython:: python + + arr = np.arange(6).reshape(3, 2) + df = pd.DataFrame(arr) + df + *Previous Behavior*: .. code-block:: ipython - In [3]: arr = np.arange(6).reshape(3, 2) - In [4]: df = pd.DataFrame(arr) In [5]: df + arr[[0], :] # 1 row, 2 columns ... ValueError: Unable to coerce to DataFrame, shape must be (3, 2): given (1, 2) @@ -1033,12 +1080,6 @@ broadcast. (:issue:`23000`) *New Behavior*: -.. ipython:: python - - arr = np.arange(6).reshape(3, 2) - df = pd.DataFrame(arr) - df - .. ipython:: python df + arr[[0], :] # 1 row, 2 columns @@ -1050,41 +1091,50 @@ broadcast. (:issue:`23000`) ExtensionType Changes ^^^^^^^^^^^^^^^^^^^^^ -:class:`pandas.api.extensions.ExtensionDtype` **Equality and Hashability** + **Equality and Hashability** Pandas now requires that extension dtypes be hashable. The base class implements a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should update the ``ExtensionDtype._metadata`` tuple to match the signature of your ``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`). -**Other changes** +**Reshaping changes** - :meth:`~pandas.api.types.ExtensionArray.dropna` has been added (:issue:`21185`) - :meth:`~pandas.api.types.ExtensionArray.repeat` has been added (:issue:`24349`) +- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) +- :meth:`pandas.api.extensions.ExtensionArray.shift` added as part of the basic ``ExtensionArray`` interface (:issue:`22387`). +- :meth:`~pandas.api.types.ExtensionArray.searchsorted` has been added (:issue:`24350`) +- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`) +- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`). + +**Dtype changes** + - ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) -- :meth:`~pandas.api.types.ExtensionArray.searchsorted` has been added (:issue:`24350`) -- An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`) - Added ``ExtensionDtype._is_numeric`` for controlling whether an extension dtype is considered numeric (:issue:`22290`). -- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) +- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) +- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) + +**Other changes** + +- A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`). +- An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`) + +**Bug Fixes** + - Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`) -- :meth:`pandas.api.extensions.ExtensionArray.shift` added as part of the basic ``ExtensionArray`` interface (:issue:`22387`). - :meth:`~Series.shift` now dispatches to :meth:`ExtensionArray.shift` (:issue:`22386`) - :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`) - :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`) - :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`). - Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) -- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) - Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`) - Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) -- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) -- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`). -- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`) - :meth:`DataFrame.stack` no longer converts to object dtype for DataFrames where each column has the same extension dtype. The output Series will have the same dtype as the columns (:issue:`23077`). - :meth:`Series.unstack` and :meth:`DataFrame.unstack` no longer convert extension arrays to object-dtype ndarrays. Each column in the output ``DataFrame`` will now have the same dtype as the input (:issue:`23077`). - Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`). - Bug in :func:`pandas.merge` when merging on an extension array-backed column (:issue:`23020`). -- A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`). .. _whatsnew_0240.api.incompatibilities: @@ -1184,19 +1234,18 @@ Datetimelike API Changes - :class:`PeriodIndex` subtraction of another ``PeriodIndex`` will now return an object-dtype :class:`Index` of :class:`DateOffset` objects instead of raising a ``TypeError`` (:issue:`20049`) - :func:`cut` and :func:`qcut` now returns a :class:`DatetimeIndex` or :class:`TimedeltaIndex` bins when the input is datetime or timedelta dtype respectively and ``retbins=True`` (:issue:`19891`) - :meth:`DatetimeIndex.to_period` and :meth:`Timestamp.to_period` will issue a warning when timezone information will be lost (:issue:`21333`) +- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`) +- :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) .. _whatsnew_0240.api.other: Other API Changes ^^^^^^^^^^^^^^^^^ -- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`) - Accessing a level of a ``MultiIndex`` with a duplicate name (e.g. in - :meth:`~MultiIndex.get_level_values`) now raises a ``ValueError`` instead of - a ``KeyError`` (:issue:`21678`). + :meth:`~MultiIndex.get_level_values`) now raises a ``ValueError`` instead of a ``KeyError`` (:issue:`21678`). - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) -- :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) - :class:`Index` subtraction will attempt to operate element-wise instead of raising ``TypeError`` (:issue:`19369`) - :class:`pandas.io.formats.style.Styler` supports a ``number-format`` property when using :meth:`~pandas.io.formats.style.Styler.to_excel` (:issue:`22015`) - :meth:`DataFrame.corr` and :meth:`Series.corr` now raise a ``ValueError`` along with a helpful error message instead of a ``KeyError`` when supplied with an invalid method (:issue:`22298`) @@ -1432,13 +1481,6 @@ Performance Improvements - Improved performance of :class:`Period` constructor, additionally benefitting ``PeriodArray`` and ``PeriodIndex`` creation (:issue:`24084` and :issue:`24118`) - Improved performance of tz-aware :class:`DatetimeArray` binary operations (:issue:`24491`) -.. _whatsnew_0240.docs: - -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ - -- - .. _whatsnew_0240.bug_fixes: Bug Fixes @@ -1658,44 +1700,6 @@ MultiIndex I/O ^^^ -- Bug where integer categorical data would be formatted as floats if ``NaN`` values were present (:issue:`19214`) - - -.. _whatsnew_0240.bug_fixes.nan_with_str_dtype: - -Proper handling of `np.NaN` in a string data-typed column with the Python engine -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There was bug in :func:`read_excel` and :func:`read_csv` with the Python -engine, where missing values turned to ``'nan'`` with ``dtype=str`` and -``na_filter=True``. Now, these missing values are converted to the string -missing indicator, ``np.nan``. (:issue:`20377`) - -.. ipython:: python - :suppress: - - from pandas.compat import StringIO - -*Previous Behavior*: - -.. code-block:: ipython - - In [5]: data = 'a,b,c\n1,,3\n4,5,6' - In [6]: df = pd.read_csv(StringIO(data), engine='python', dtype=str, na_filter=True) - In [7]: df.loc[0, 'b'] - Out[7]: - 'nan' - -*New Behavior*: - -.. ipython:: python - - data = 'a,b,c\n1,,3\n4,5,6' - df = pd.read_csv(StringIO(data), engine='python', dtype=str, na_filter=True) - df.loc[0, 'b'] - -Notice how we now instead output ``np.nan`` itself instead of a stringified form of it. - - Bug in :func:`read_csv` in which a column specified with ``CategoricalDtype`` of boolean categories was not being correctly coerced from string values to booleans (:issue:`20498`) - Bug in :meth:`DataFrame.to_sql` when writing timezone aware data (``datetime64[ns, tz]`` dtype) would raise a ``TypeError`` (:issue:`9086`) - Bug in :meth:`DataFrame.to_sql` where a naive :class:`DatetimeIndex` would be written as ``TIMESTAMP WITH TIMEZONE`` type in supported databases, e.g. PostgreSQL (:issue:`23510`) @@ -1711,11 +1715,11 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - :func:`read_sas()` will correctly parse sas7bdat files with data page types having also bit 7 set (so page type is 128 + 256 = 384) (:issue:`16615`) - Bug in :func:`read_sas()` in which an incorrect error was raised on an invalid file format. (:issue:`24548`) - Bug in :meth:`detect_client_encoding` where potential ``IOError`` goes unhandled when importing in a mod_wsgi process due to restricted access to stdout. (:issue:`21552`) -- Bug in :func:`to_html()` with ``index=False`` misses truncation indicators (...) on truncated DataFrame (:issue:`15019`, :issue:`22783`) -- Bug in :func:`to_html()` with ``index=False`` when both columns and row index are ``MultiIndex`` (:issue:`22579`) -- Bug in :func:`to_html()` with ``index_names=False`` displaying index name (:issue:`22747`) -- Bug in :func:`to_html()` with ``header=False`` not displaying row index names (:issue:`23788`) -- Bug in :func:`to_html()` with ``sparsify=False`` that caused it to raise ``TypeError`` (:issue:`22887`) +- Bug in :func:`DataFrame.to_html()` with ``index=False`` misses truncation indicators (...) on truncated DataFrame (:issue:`15019`, :issue:`22783`) +- Bug in :func:`DataFrame.to_html()` with ``index=False`` when both columns and row index are ``MultiIndex`` (:issue:`22579`) +- Bug in :func:`DataFrame.to_html()` with ``index_names=False`` displaying index name (:issue:`22747`) +- Bug in :func:`DataFrame.to_html()` with ``header=False`` not displaying row index names (:issue:`23788`) +- Bug in :func:`DataFrame.to_html()` with ``sparsify=False`` that caused it to raise ``TypeError`` (:issue:`22887`) - Bug in :func:`DataFrame.to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`) - Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`) - Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`). @@ -1838,7 +1842,6 @@ Other ^^^^^ - Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`) -- Require at least 0.28.2 version of ``cython`` to support read-only memoryviews (:issue:`21688`) .. _whatsnew_0.24.0.contributors:
https://api.github.com/repos/pandas-dev/pandas/pulls/24649
2019-01-05T21:53:34Z
2019-01-06T15:52:32Z
2019-01-06T15:52:32Z
2019-01-06T15:52:32Z
CI/TST: Check that unittest.mock is not being used in testing
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index da1b035cf3ed2..d4a2945f8e3a5 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -148,6 +148,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --exclude=*.pyc --exclude=testing.py --exclude=test_util.py assert_raises_regex pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + # Check for the following code in testing: `unittest.mock`, `mock.Mock()` or `mock.patch` + MSG='Check that unittest.mock is not used (pytest builtin monkeypatch fixture should be used instread)' ; echo $MSG + invgrep -r -E --include '*.py' '(unittest(\.| import )mock|mock\.Mock\(\)|mock\.patch)' pandas/tests/ + RET=$(($RET + $?)) ; echo $MSG "DONE" + # Check that we use pytest.raises only as a context manager # # For any flake8-compliant code, the only way this regex gets
xref https://github.com/pandas-dev/pandas/pull/24624#issuecomment-451658866
https://api.github.com/repos/pandas-dev/pandas/pulls/24648
2019-01-05T21:52:31Z
2019-01-05T22:47:22Z
2019-01-05T22:47:22Z
2020-09-17T00:59:13Z
PERF: 10x speedup in Series/DataFrame construction for lists of ints
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 7c78fe7e7a177..9082b4186bfa4 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -41,7 +41,7 @@ def list_of_lists_with_none(arr): class SeriesConstructors(object): - param_names = ["data_fmt", "with_index"] + param_names = ["data_fmt", "with_index", "dtype"] params = [[no_change, list, list_of_str, @@ -52,15 +52,19 @@ class SeriesConstructors(object): list_of_lists, list_of_tuples_with_none, list_of_lists_with_none], - [False, True]] + [False, True], + ['float', 'int']] - def setup(self, data_fmt, with_index): + def setup(self, data_fmt, with_index, dtype): N = 10**4 - arr = np.random.randn(N) + if dtype == 'float': + arr = np.random.randn(N) + else: + arr = np.arange(N) self.data = data_fmt(arr) self.index = np.arange(N) if with_index else None - def time_series_constructor(self, data_fmt, with_index): + def time_series_constructor(self, data_fmt, with_index, dtype): Series(self.data, index=self.index) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 6e6d35f00725c..85eb6c3421222 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2011,7 +2011,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats[i] = <float64_t>val complexes[i] = <double complex>val if not seen.null_: - seen.saw_int(int(val)) + val = int(val) + seen.saw_int(val) if ((seen.uint_ and seen.sint_) or val > oUINT64_MAX or val < oINT64_MIN):
This PR is a minor tweak to the `int64`/`uint64` overflow fix added in https://github.com/pandas-dev/pandas/pull/18624 Simply casting to an `int` after doing a typecheck is sufficient for the compiler to generate a 10x speedup: ``` $ asv compare upstream/master HEAD --sort ratio -s Benchmarks that have improved: before after ratio [f074abef] [80641ddf] <series_list_int_speedup~1> <series_list_int_speedup> failed 7.39±0s n/a strings.Dummies.time_get_dummies - 61.7±3ms 11.7±0.5ms 0.19 ctors.SeriesConstructors.time_series_constructor(<function arr_dict>, True, 'int') - 63.0±2ms 11.1±0.3ms 0.18 ctors.SeriesConstructors.time_series_constructor(<function arr_dict>, False, 'int') - 55.8±2ms 5.37±0.2ms 0.10 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'int') - 55.3±5ms 4.84±0.2ms 0.09 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'int') ``` This is how `maybe_convert_numeric()` already handles `int`s, so this just brings `maybe_convert_object()` back into alignment. I believe this would yield a similar speedup for `DataFrame`s but we don't have any benchmarks explicitly testing as such. However, the `get_dummies()` benchmark involves expanding to a `DataFrame` and gets a speedup of similar magnitude (not visible as it previously would time out after 30s). - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24647
2019-01-05T21:31:56Z
2019-01-06T15:48:43Z
2019-01-06T15:48:42Z
2019-01-06T15:48:43Z
Repr for Integer and Pandas Dtypes
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index f8f87ff1c96f1..b3dde6bf2bd93 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -36,6 +36,11 @@ class _IntegerDtype(ExtensionDtype): type = None na_value = np.nan + def __repr__(self): + sign = 'U' if self.is_unsigned_integer else '' + return "{sign}Int{size}Dtype()".format(sign=sign, + size=8 * self.itemsize) + @cache_readonly def is_signed_integer(self): return self.kind == 'i' diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index b1dc77e65eee8..47517782e2bbf 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -38,8 +38,12 @@ def __init__(self, dtype): self._name = dtype.name self._type = dtype.type + def __repr__(self): + return "PandasDtype({!r})".format(self.name) + @property def numpy_dtype(self): + """The NumPy dtype this PandasDtype wraps.""" return self._dtype @property @@ -72,6 +76,7 @@ def kind(self): @property def itemsize(self): + """The element size of this data-type object.""" return self._dtype.itemsize diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index 173f9707e76c2..09298bb5cd08d 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -57,6 +57,20 @@ def test_dtypes(dtype): assert dtype.name is not None +@pytest.mark.parametrize('dtype, expected', [ + (Int8Dtype(), 'Int8Dtype()'), + (Int16Dtype(), 'Int16Dtype()'), + (Int32Dtype(), 'Int32Dtype()'), + (Int64Dtype(), 'Int64Dtype()'), + (UInt8Dtype(), 'UInt8Dtype()'), + (UInt16Dtype(), 'UInt16Dtype()'), + (UInt32Dtype(), 'UInt32Dtype()'), + (UInt64Dtype(), 'UInt64Dtype()'), +]) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + def test_repr_array(): result = repr(integer_array([1, None, 3])) expected = ( diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py index b17e509c24e71..a77f1f8a7b3d1 100644 --- a/pandas/tests/arrays/test_numpy.py +++ b/pandas/tests/arrays/test_numpy.py @@ -71,6 +71,17 @@ def test_is_boolean(dtype, expected): assert dtype._is_boolean is expected +def test_repr(): + dtype = PandasDtype(np.dtype("int64")) + assert repr(dtype) == "PandasDtype('int64')" + + +def test_constructor_from_string(): + result = PandasDtype.construct_from_string("int64") + expected = PandasDtype(np.dtype("int64")) + assert result == expected + + # ---------------------------------------------------------------------------- # Construction
```python >>> pd.PandasDtype("int64") PandasDtype('int64') >>> pd.Int32Dtype() Int32Dtype() >>> pd.UInt8Dtype() UInt8Dtype() ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24646
2019-01-05T20:17:41Z
2019-01-05T21:07:35Z
2019-01-05T21:07:35Z
2019-01-05T21:07:39Z
WIP: NaTD
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 037e7de27adc3..2faef4128050d 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +from __future__ import division + import collections import textwrap import warnings @@ -36,6 +38,7 @@ from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, NPY_NAT, c_NaT as NaT) from pandas._libs.tslibs.offsets cimport to_offset +from pandas._libs.tslibs.offsets import _Tick # ---------------------------------------------------------------------- # Constants @@ -1323,7 +1326,7 @@ class Timedelta(_Timedelta): # integers or floats return Timedelta(self.value / other, unit='ns') - elif not _validate_ops_compat(other): + elif not _validate_ops_compat(other) or other is NaTD: return NotImplemented other = Timedelta(other) @@ -1346,7 +1349,7 @@ class Timedelta(_Timedelta): elif hasattr(other, 'dtype'): return other / self.to_timedelta64() - elif not _validate_ops_compat(other): + elif not _validate_ops_compat(other) or other is NaTD: return NotImplemented other = Timedelta(other) @@ -1522,3 +1525,278 @@ cdef _broadcast_floordiv_td64(int64_t value, object other, # resolution in ns Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1) Timedelta.max = Timedelta(np.iinfo(np.int64).max) + + +# ---------------------------------------------------------------------- +# An internally used timedelta-NaT largely to use in place of +# np.timedelta64('NaT') for binary operations. + +cdef inline bint is_tdlike_scalar(object obj): + return (is_timedelta64_object(obj) or + PyDelta_Check(obj) or + isinstance(obj, _Tick)) + + +cdef class _TDNaTType(timedelta): + """ + NaTD (Not-a-TimeDelta) is a pandas-internal analogue of NaT that behaves + explicitly like a timedelta, never like a datetime. + """ + cdef readonly: + int64_t value + + def __cinit__(self): + # TODO: is there such a thing as a super __cinit__? + self.value = NPY_NAT + + def __repr__(self): + return 'NaTD' + + def __str__(self): + return 'NaTD' + + @property + def asm8(self): + return np.timedelta64('NaT', 'ns') + + def __hash__(_TDNaTType self): + # py3k needs this defined here + return hash(self.value) + + def __richcmp__(_TDNaTType self, object other, int op): + cdef: + int ndim = getattr(other, 'ndim', -1) + + if ndim == -1: + return op == Py_NE + + if ndim == 0: + if is_tdlike_scalar(other): + return op == Py_NE + else: # FIXME: shouldnt we be raising only for inequalities? + raise TypeError('Cannot compare type %r with type %r' % + (type(self).__name__, type(other).__name__)) + # Note: instead of passing "other, self, _reverse_ops[op]", we observe + # that `_nat_scalar_rules` is invariant under `_reverse_ops`, + # rendering it unnecessary. + return PyObject_RichCompare(other, self, op) + + def __add__(self, other): + if other is NaT: + return NaT + if is_tdlike_scalar(other): + return NaTD + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind in ['m', 'M']: + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view(other.dtype) + elif other.dtype.kind == 'O': + return np.array([self + x for x in other]) + + raise TypeError("Cannot add dtype {dtype} to Timedelta" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot add {typ} to Timedelta" + .format(typ=type(other).__name__)) + + def __sub__(self, other): + if is_tdlike_scalar(other): + return NaTD + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind == 'm': + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view(other.dtype) + elif other.dtype.kind == 'O': + # TODO: does this get shape right? + return np.array([self - x for x in other]) + + raise TypeError("Cannot subtract dtype {dtype} from Timedelta" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot subtract {typ} from Timedelta" + .format(typ=type(other).__name__)) + + def __mul__(self, other): + if is_integer_object(other) or is_float_object(other): + return NaTD + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind in ['i', 'u', 'f']: + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view("timedelta64[ns]") + elif other.dtype.kind == 'O': + # TODO: does this get shape right? + return np.array([self * x for x in other]) + + raise TypeError("Cannot multiply Timedelta by dtype {dtype}" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot multiply Timedelta by {typ}" + .format(typ=type(other).__name__)) + + def __truediv__(self, other): + if is_tdlike_scalar(other): + return np.nan + + if is_integer_object(other) or is_float_object(other): + return NaTD + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind in ['i', 'u', 'f']: + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view("timedelta64[ns]") + elif other.dtype.kind == 'm': + result = np.empty(other.shape, dtype=np.float64) + result.fill(np.nan) + return result + elif other.dtype.kind == 'O': + # TODO: does this get shape right? + return np.array([self / x for x in other]) + + raise TypeError("Cannot divide Timedelta by dtype {dtype}" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot divide Timedelta by {typ}" + .format(typ=type(other).__name__)) + + def __floordiv__(self, other): + return self.__truediv__(other) # TODO: is this right? + + def __mod__(self, other): + # Naive implementation, room for optimization + return self.__divmod__(other)[1] + + def __divmod__(self, other): + # Naive implementation, room for optimization + div = self // other + return div, self - div * other + + if not PY3: + def __div__(self, other): + return self.__truediv__(other) + + +class TDNaTType(_TDNaTType): + __array_priority__ = 100 + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + if is_tdlike_scalar(other): + return NaTD + + if is_datetime64_object(other) or PyDateTime_Check(other): + return NaT + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind == 'M': + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view(other.dtype) + if other.dtype.kind == 'm': + result = np.empty(other.shape, dtype='i8') + result.fill(NPY_NAT) + return result.view(other.dtype) + elif other.dtype.kind == 'O': + return np.array([x - self for x in other]) + + raise TypeError("Cannot subtract Timedelta from dtype {dtype}" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot subtract Timedelta from {typ}" + .format(typ=type(other).__name__)) + + def __rmul__(self, other): + return self.__mul__(other) + + def __rtruediv__(self, other): + if is_tdlike_scalar(other): + return np.nan + + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + + if util.is_array(other): + if other.dtype.kind == 'm': + result = np.empty(other.shape, dtype=np.float64) + result.fill(np.nan) + return result + elif other.dtype.kind == 'O': + return np.array([x / self for x in other]) + + raise TypeError("Cannot divide dtype {dtype} by Timedelta" + .format(dtype=other.dtype)) + + elif hasattr(other, "dtype"): + return NotImplemented + + # all thats left is invalid scalars + raise TypeError("Cannot divide {typ} by Timedelta" + .format(typ=type(other).__name__)) + + def __rfloordiv__(self, other): + return self.__rtruediv__(other) # TODO: is this right? + + def __rmod__(self, other): + # Naive implementation, room for optimization + return self.__rdivmod__(other)[1] + + def __rdivmod__(self, other): + # Naive implementation, room for optimization + div = other // self + return div, other - div * self + + if not PY3: + def __rdiv__(self, other): + return self.__rtruediv__(other) + + +NaTD = TDNaTType() diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 1ec37c9f228a6..fe35fb27e199a 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -10,7 +10,7 @@ from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import ( - array_to_timedelta64, parse_timedelta_unit) + NaTD, array_to_timedelta64, parse_timedelta_unit) import pandas.compat as compat from pandas.util._decorators import Appender @@ -364,7 +364,7 @@ def _add_datetimelike_scalar(self, other): assert other is not NaT other = Timestamp(other) - if other is NaT: + if other is NaT: # TODO: use NaTD # In this case we specifically interpret NaT as a datetime, not # the timedelta interpretation we would get by returning self + NaT result = self.asi8.view('m8[ms]') + NaT.to_datetime64() @@ -435,7 +435,7 @@ def __truediv__(self, other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) - if other is NaT: + if other is NaT: # TODO: use NaTD # specifically timedelta64-NaT result = np.empty(self.shape, dtype=np.float64) result.fill(np.nan) @@ -485,7 +485,7 @@ def __rtruediv__(self, other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) - if other is NaT: + if other is NaT: # TODO: use NaTD # specifically timedelta64-NaT result = np.empty(self.shape, dtype=np.float64) result.fill(np.nan) @@ -534,7 +534,7 @@ def __floordiv__(self, other): if is_scalar(other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) - if other is NaT: + if other is NaT: # TODO: use NaTD # treat this specifically as timedelta-NaT result = np.empty(self.shape, dtype=np.float64) result.fill(np.nan) @@ -598,10 +598,7 @@ def __rfloordiv__(self, other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) if other is NaT: - # treat this specifically as timedelta-NaT - result = np.empty(self.shape, dtype=np.float64) - result.fill(np.nan) - return result + other = NaTD # dispatch to Timedelta implementation result = other.__floordiv__(self._data) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c853a30c0de79..e0f015785c864 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6,7 +6,7 @@ import numpy as np from pandas._libs import ( - Timedelta, algos as libalgos, index as libindex, join as libjoin, lib, + NaT, Timedelta, algos as libalgos, index as libindex, join as libjoin, lib, tslibs) from pandas._libs.lib import is_datetime_array import pandas.compat as compat @@ -4888,6 +4888,10 @@ def _evaluate_with_timedelta_like(self, other, op): other=type(other).__name__)) other = Timedelta(other) + if other is NaT: + from pandas._libs.tslibs.timedeltas import NaTD + other = NaTD + values = self.values with np.errstate(all='ignore'): @@ -5027,8 +5031,8 @@ def _add_numeric_methods_binary(cls): cls.__div__ = _make_arithmetic_op(operator.div, cls) cls.__rdiv__ = _make_arithmetic_op(ops.rdiv, cls) - # TODO: rmod? rdivmod? cls.__mod__ = _make_arithmetic_op(operator.mod, cls) + cls.__rmod__ = _make_arithmetic_op(ops.rmod, cls) cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) cls.__divmod__ = _make_arithmetic_op(divmod, cls) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index e11f0ee01e57c..64d376646e008 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -127,7 +127,7 @@ def maybe_upcast_for_op(obj): # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. - return pd.Timedelta(obj) + return pd.Timedelta(obj) # TODO: use NaTD for isna case elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index 9917c45ef6d12..c048f2c88069f 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -70,6 +70,82 @@ def test_more_na_comparisons(self, dtype): # ------------------------------------------------------------------ # Arithmetic +class TestTimedeltaNaTArithmetic(object): + # Tests for arithmetic with np.timedelta64('NaT') which has some tough + # corner cases + + def test_tdarr_rfloordiv_nat(self): + # TODO: test belongs elsewhere, mostly just putting this here because + # it is the only TDA method patched for the proof of concept + td = np.timedelta64('NaT') + + arr = np.arange(3) * 10**9 + tda = pd.TimedeltaIndex(arr)._data + + result = td // tda + + expected = np.array([np.nan, np.nan, np.nan]) + tm.assert_numpy_array_equal(result, expected) + + def test_numeric_with_timedelta_nat(self, box): + arr = np.array([1, 2, 3, 4], dtype=np.int64) + obj = tm.box_expected(arr, box) + + td = np.timedelta64('NaT') + + expected = np.array([td] * 4) + expected = tm.box_expected(expected, box) + + # TODO: RangeIndex + for dtype in [np.int64, np.float64, np.uint64, object]: + dobj = obj.astype(dtype) + for op in [operator.add, operator.sub, ops.radd, ops.rsub]: + with pytest.raises(TypeError): + op(td, dobj) + + if type(dobj) is pd.Index: + # FIXME: implement these on pd.Index + continue + + result = dobj * td + tm.assert_equal(result, expected) + result = td * dobj + tm.assert_equal(result, expected) + result = td / dobj + tm.assert_equal(result, expected) + result = td // dobj + tm.assert_equal(result, expected) + result = td % dobj + tm.assert_equal(result, expected) + + # ops that are invalid with tdnat on the right + with pytest.raises(TypeError): + dobj / td + with pytest.raises(TypeError): + dobj // td + with pytest.raises(TypeError): + dobj % td + with pytest.raises(TypeError): + divmod(dobj, td) + + @pytest.mark.xfail(reason="I haven't fixed it yet...") + def test_object_with_timedelta_nat(self, box): + td = np.timedelta64('NaT') + + arr = np.array([ + pd.offsets.Minute(2), + pd.Timedelta(hours=2), + pd.Timedelta(seconds=1).to_pytimedelta(), + pd.Timedelta(days=3).to_timedelta64()]) + obj = tm.box_expected(arr, box) + + # FIXME: obj + td raises incorrectly + result = obj + td + + expected = tm.box_expected(np.array([td, td, td, td])) + tm.assert_equal(result, expected) + + class TestArithmetic(object): # TODO: parametrize
There are a bunch of places where we do something like: ``` def method(self, other): [...] if isinstance(other, (np.timedelta64, timedelta, Tick)): other = Timedelta(other) [...] ``` But in the case where we start with `np.timedelta64('NaT')`, we end up with `NaT` which is datetime-like instead of timedelta-like. In some of the places where this occurs, we check for this case and special-case it. In others we miss it completely. I am _not_ proposing to change the behavior of `Timedelta` or make `NaTD` public. The idea is that since we need the arithmetic/comparison methods anyway, we might as well put them into one place and handle them systematically. Tests are a mess at the moment.
https://api.github.com/repos/pandas-dev/pandas/pulls/24645
2019-01-05T17:49:23Z
2019-03-20T02:09:56Z
null
2021-11-20T23:23:23Z
CLN: remove redundant mac wheel-build code
diff --git a/setup.py b/setup.py index 6cd359b281b56..7ba4f5ba399d0 100755 --- a/setup.py +++ b/setup.py @@ -401,20 +401,6 @@ def run(self): cmdclass.update({'clean': CleanCommand, 'build': build}) -try: - from wheel.bdist_wheel import bdist_wheel - - class BdistWheel(bdist_wheel): - def get_tag(self): - tag = bdist_wheel.get_tag(self) - repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64' - if tag[2] == 'macosx_10_6_intel': - tag = (tag[0], tag[1], repl) - return tag - cmdclass['bdist_wheel'] = BdistWheel -except ImportError: - pass - if cython: suffix = '.pyx' cmdclass['build_ext'] = CheckingBuildExt
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry this code is redundant for wheels built using https://github.com/MacPython/pandas-wheels, as the line https://github.com/matthew-brett/multibuild/blob/266d88fe1e474748cc8a3823dc3934bf55e76383/osx_utils.sh#L306 adds tags for 10.9 and 10.10. Note: circleci fails as I forgot to turn it off on my fork, after it was turned off upstream
https://api.github.com/repos/pandas-dev/pandas/pulls/24644
2019-01-05T16:43:45Z
2019-01-05T18:32:17Z
2019-01-05T18:32:17Z
2019-01-05T22:21:07Z
Make DTA/TDA/PA return NotImplemented on comparisons
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e6fbc6d1f4b15..b858dc0a8d54a 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -19,7 +19,8 @@ is_extension_type, is_float_dtype, is_int64_dtype, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries +from pandas.core.dtypes.generic import ( + ABCDataFrame, ABCIndexClass, ABCPandasArray, ABCSeries) from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -96,9 +97,8 @@ def _dt_array_cmp(cls, op): nat_result = True if opname == '__ne__' else False def wrapper(self, other): - # TODO: return NotImplemented for Series / Index and let pandas unbox - # Right now, returning NotImplemented for Index fails because we - # go into the index implementation, which may be a bug? + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): + return NotImplemented other = lib.item_from_zerodim(other) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 34bb03b249c21..513bd7223e880 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -17,7 +17,8 @@ _TD_DTYPE, ensure_object, is_datetime64_dtype, is_float_dtype, is_list_like, is_period_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import PeriodDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCPeriodIndex, ABCSeries +from pandas.core.dtypes.generic import ( + ABCDataFrame, ABCIndexClass, ABCPeriodIndex, ABCSeries) from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algos @@ -48,17 +49,13 @@ def _period_array_cmp(cls, op): def wrapper(self, other): op = getattr(self.asi8, opname) - # We want to eventually defer to the Series or PeriodIndex (which will - # return here with an unboxed PeriodArray). But before we do that, - # we do a bit of validation on type (Period) and freq, so that our - # error messages are sensible + + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): + return NotImplemented + if is_list_like(other) and len(other) != len(self): raise ValueError("Lengths must match") - not_implemented = isinstance(other, (ABCSeries, ABCIndexClass)) - if not_implemented: - other = other._values - if isinstance(other, Period): self._check_compatible_with(other) @@ -66,8 +63,6 @@ def wrapper(self, other): elif isinstance(other, cls): self._check_compatible_with(other) - if not_implemented: - return NotImplemented result = op(other.asi8) mask = self._isnan | other._isnan diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index ab9986b5bff69..624305ec4303d 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -64,6 +64,9 @@ def _td_array_cmp(cls, op): nat_result = True if opname == '__ne__' else False def wrapper(self, other): + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): + return NotImplemented + if _is_convertible_to_td(other) or other is NaT: try: other = Timedelta(other) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5a8809f754385..aa7332472fc07 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -109,6 +109,11 @@ def _create_comparison_method(cls, op): Create a comparison method that dispatches to ``cls.values``. """ def wrapper(self, other): + if isinstance(other, ABCSeries): + # the arrays defer to Series for comparison ops but the indexes + # don't, so we have to unwrap here. + other = other._values + result = op(self._data, maybe_unwrap_index(other)) return result diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index cdacd4b42d683..92f209b94f00d 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -152,7 +152,10 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array): # TODO: Could parametrize over boxes for idx? idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A') - with pytest.raises(IncompatibleFrequency, match=msg): + rev_msg = (r'Input has different freq=(M|2M|3M) from ' + r'PeriodArray\(freq=A-DEC\)') + idx_msg = rev_msg if box_with_array is tm.to_array else msg + with pytest.raises(IncompatibleFrequency, match=idx_msg): base <= idx # Different frequency @@ -164,7 +167,10 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array): Period('2011', freq='4M') >= base idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M') - with pytest.raises(IncompatibleFrequency, match=msg): + rev_msg = (r'Input has different freq=(M|2M|3M) from ' + r'PeriodArray\(freq=4M\)') + idx_msg = rev_msg if box_with_array is tm.to_array else msg + with pytest.raises(IncompatibleFrequency, match=idx_msg): base <= idx @pytest.mark.parametrize('freq', ['M', '2M', '3M'])
Before implementing a boilerplate decorator like in #24282, going through to standardize the affected behaviors.
https://api.github.com/repos/pandas-dev/pandas/pulls/24643
2019-01-05T16:08:27Z
2019-01-05T21:54:32Z
2019-01-05T21:54:32Z
2019-01-05T21:59:16Z
DOC: fix warnings in docstrings examples for deprecated functions
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e6fbc6d1f4b15..3d5312ff1ed49 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -766,8 +766,8 @@ def tz_convert(self, tz): With the `tz` parameter, we can change the DatetimeIndex to other time zones: - >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00', - ... freq='H', periods=3, tz='Europe/Berlin') + >>> dti = pd.date_range(start='2014-08-01 09:00', + ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', @@ -784,8 +784,8 @@ def tz_convert(self, tz): With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): - >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H', - ... periods=3, tz='Europe/Berlin') + >>> dti = pd.date_range(start='2014-08-01 09:00',freq='H', + ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', @@ -1037,8 +1037,8 @@ def normalize(self): Examples -------- - >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', - ... periods=3, tz='Asia/Calcutta') + >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', @@ -1164,7 +1164,7 @@ def month_name(self, locale=None): Examples -------- - >>> idx = pd.DatetimeIndex(start='2018-01', freq='M', periods=3) + >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') @@ -1200,7 +1200,7 @@ def day_name(self, locale=None): Examples -------- - >>> idx = pd.DatetimeIndex(start='2018-01-01', freq='D', periods=3) + >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) >>> idx DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], dtype='datetime64[ns]', freq='D') diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d78a19dea9490..d271081aeaa51 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4433,8 +4433,8 @@ def _reindex_multi(self, axes, copy, fill_value): num_legs num_wings dog 4 0 hawk 2 2 - >>> df.reindex_axis(['num_wings', 'num_legs', 'num_heads'], - ... axis='columns') + >>> df.reindex(['num_wings', 'num_legs', 'num_heads'], + ... axis='columns') num_wings num_legs num_heads dog 0 4 NaN hawk 2 2 NaN @@ -7352,7 +7352,7 @@ def clip_upper(self, threshold, axis=None, inplace=False): 4 5 dtype: int64 - >>> s.clip_upper(3) + >>> s.clip(upper=3) 0 1 1 2 2 3 @@ -7360,11 +7360,11 @@ def clip_upper(self, threshold, axis=None, inplace=False): 4 3 dtype: int64 - >>> t = [5, 4, 3, 2, 1] - >>> t + >>> elemwise_thresholds = [5, 4, 3, 2, 1] + >>> elemwise_thresholds [5, 4, 3, 2, 1] - >>> s.clip_upper(t) + >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 @@ -7428,7 +7428,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) - >>> s.clip_lower(8) + >>> s.clip(lower=8) 0 8 1 8 2 8 @@ -7440,7 +7440,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] - >>> s.clip_lower(elemwise_thresholds) + >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 @@ -7457,7 +7457,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): 1 3 4 2 5 6 - >>> df.clip_lower(3) + >>> df.clip(lower=3) A B 0 3 3 1 3 4 @@ -7466,7 +7466,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. - >>> df.clip_lower(np.array([[3, 4], [2, 2], [6, 2]])) + >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 @@ -7476,13 +7476,13 @@ def clip_lower(self, threshold, axis=None, inplace=False): `threshold` should be the same length as the axis specified by `axis`. - >>> df.clip_lower([3, 3, 5], axis='index') + >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 - >>> df.clip_lower([4, 5], axis='columns') + >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5
- [x] closes #24525 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I have updated all the `FutureWarning`, let me know if `PerformanceWarning` and `DtypeWarning` shown below also needs a fix. ```zsh python scripts/validate_docstrings.py --errors GL06,GL07,GL09,SS04,PR03,PR05,EX04 scripts/validate_docstrings.py:764: PerformanceWarning: indexing past lexsort depth may impact performance. errs, wrns, examples_errs = get_validation_data(doc) scripts/validate_docstrings.py:764: PerformanceWarning: indexing past lexsort depth may impact performance. errs, wrns, examples_errs = get_validation_data(doc) scripts/validate_docstrings.py:524: DtypeWarning: Columns (0) have mixed types. Specify dtype option on import or set low_memory=False. runner.run(test, out=f.write) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24642
2019-01-05T14:42:34Z
2019-01-05T17:31:42Z
2019-01-05T17:31:42Z
2019-01-05T23:58:06Z
Fix 32-bit builds by correctly using intp_t instead of int64_t for nmpy.searchsorted result, part 2 (#24621)
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 7c9c2cafd1afb..6aa02ca1e5421 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -1290,7 +1290,8 @@ def is_date_array_normalized(int64_t[:] stamps, object tz=None): cdef: Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans - int64_t[:] deltas, pos + int64_t[:] deltas + intp_t[:] pos npy_datetimestruct dts int64_t local_val, delta str typ
xref #24613
https://api.github.com/repos/pandas-dev/pandas/pulls/24640
2019-01-05T13:55:17Z
2019-01-05T14:48:33Z
2019-01-05T14:48:33Z
2019-01-05T14:48:33Z
CLN: Parameterize test cases
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 01aa8e8ccc1ee..7dfc21562cc5d 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -239,29 +239,17 @@ def test_conversion_outofbounds_datetime(self): xp = converter.dates.date2num(values[0]) assert rs == xp - def test_time_formatter(self): + @pytest.mark.parametrize('time,format_expected', [ + (0, '00:00'), # time2num(datetime.time.min) + (86399.999999, '23:59:59.999999'), # time2num(datetime.time.max) + (90000, '01:00'), + (3723, '01:02:03'), + (39723.2, '11:02:03.200') + ]) + def test_time_formatter(self, time, format_expected): # issue 18478 - - # time2num(datetime.time.min) - rs = self.tc(0) - xp = '00:00' - assert rs == xp - - # time2num(datetime.time.max) - rs = self.tc(86399.999999) - xp = '23:59:59.999999' - assert rs == xp - - # some other times - rs = self.tc(90000) - xp = '01:00' - assert rs == xp - rs = self.tc(3723) - xp = '01:02:03' - assert rs == xp - rs = self.tc(39723.2) - xp = '11:02:03.200' - assert rs == xp + result = self.tc(time) + assert result == format_expected def test_dateindex_conversion(self): decimals = 9
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24639
2019-01-05T13:43:40Z
2019-01-05T14:56:03Z
2019-01-05T14:56:03Z
2019-01-05T16:09:18Z
DOC: parallel safe
diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py index 8c9fa5bc961d1..179ba19a0908a 100644 --- a/doc/sphinxext/contributors.py +++ b/doc/sphinxext/contributors.py @@ -46,4 +46,8 @@ def run(self): def setup(app): app.add_directive('contributors', ContributorsDirective) - return {'version': '0.1'} + return { + 'version': '0.1', + 'parallel_read_safe': True, + 'parallel_write_safe': True, + }
@datapythonista this may have been why the parallel option wasn't helping you.
https://api.github.com/repos/pandas-dev/pandas/pulls/24638
2019-01-05T12:54:42Z
2019-01-05T14:55:34Z
2019-01-05T14:55:34Z
2019-01-06T00:07:01Z
REF: io/formats/html.py (and io/formats/format.py)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 9dc2692f276e3..f8ee9c273fd59 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -778,7 +778,7 @@ def space_format(x, y): for i, (col, x) in enumerate(zip(columns, fmt_columns))] - if self.show_index_names and self.has_index_names: + if self.show_row_idx_names: for x in str_columns: x.append('') @@ -793,22 +793,33 @@ def has_index_names(self): def has_column_names(self): return _has_names(self.frame.columns) + @property + def show_row_idx_names(self): + return all((self.has_index_names, + self.index, + self.show_index_names)) + + @property + def show_col_idx_names(self): + return all((self.has_column_names, + self.show_index_names, + self.header)) + def _get_formatted_index(self, frame): # Note: this is only used by to_string() and to_latex(), not by # to_html(). index = frame.index columns = frame.columns - - show_index_names = self.show_index_names and self.has_index_names - show_col_names = (self.show_index_names and self.has_column_names) - fmt = self._get_formatter('__index__') if isinstance(index, ABCMultiIndex): - fmt_index = index.format(sparsify=self.sparsify, adjoin=False, - names=show_index_names, formatter=fmt) + fmt_index = index.format( + sparsify=self.sparsify, adjoin=False, + names=self.show_row_idx_names, formatter=fmt) else: - fmt_index = [index.format(name=show_index_names, formatter=fmt)] + fmt_index = [index.format( + name=self.show_row_idx_names, formatter=fmt)] + fmt_index = [tuple(_make_fixed_width(list(x), justify='left', minimum=(self.col_space or 0), adj=self.adj)) for x in fmt_index] @@ -816,7 +827,7 @@ def _get_formatted_index(self, frame): adjoined = self.adj.adjoin(1, *fmt_index).split('\n') # empty space for columns - if show_col_names: + if self.show_col_idx_names: col_header = ['{x}'.format(x=x) for x in self._get_column_name_list()] else: diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 390c3f3d5c709..90f1dbe704806 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -45,23 +45,11 @@ def __init__(self, formatter, classes=None, notebook=False, border=None, @property def show_row_idx_names(self): - return all((self.fmt.has_index_names, - self.fmt.index, - self.fmt.show_index_names)) + return self.fmt.show_row_idx_names @property def show_col_idx_names(self): - # see gh-22579 - # Column misalignment also occurs for - # a standard index when the columns index is named. - # Determine if ANY column names need to be displayed - # since if the row index is not displayed a column of - # blank cells need to be included before the DataFrame values. - # TODO: refactor to add show_col_idx_names property to - # DataFrameFormatter - return all((self.fmt.has_column_names, - self.fmt.show_index_names, - self.fmt.header)) + return self.fmt.show_col_idx_names @property def row_levels(self): @@ -184,14 +172,28 @@ def write_style(self): template = dedent('\n'.join((template_first, template_mid, template_last))) - if self.notebook: - self.write(template) + self.write(template) def write_result(self, buf): - indent = 0 - id_section = "" - frame = self.frame + if self.notebook: + self.write('<div>') + self.write_style() + + self._write_table() + + if self.should_show_dimensions: + by = chr(215) if compat.PY3 else unichr(215) # × + self.write(u('<p>{rows} rows {by} {cols} columns</p>') + .format(rows=len(self.frame), + by=by, + cols=len(self.frame.columns))) + if self.notebook: + self.write('</div>') + + buffer_put_lines(buf, self.elements) + + def _write_table(self, indent=0): _classes = ['dataframe'] # Default class. use_mathjax = get_option("display.html.use_mathjax") if not use_mathjax: @@ -204,33 +206,21 @@ def write_result(self, buf): .format(typ=type(self.classes))) _classes.extend(self.classes) - if self.notebook: - self.write('<div>') - - self.write_style() - - if self.table_id is not None: + if self.table_id is None: + id_section = "" + else: id_section = ' id="{table_id}"'.format(table_id=self.table_id) + self.write('<table border="{border}" class="{cls}"{id_section}>' .format(border=self.border, cls=' '.join(_classes), id_section=id_section), indent) - indent += self.indent_delta - indent = self._write_header(indent) - indent = self._write_body(indent) + if self.fmt.header or self.show_row_idx_names: + self._write_header(indent + self.indent_delta) - self.write('</table>', indent) - if self.should_show_dimensions: - by = chr(215) if compat.PY3 else unichr(215) # × - self.write(u('<p>{rows} rows {by} {cols} columns</p>') - .format(rows=len(frame), - by=by, - cols=len(frame.columns))) + self._write_body(indent + self.indent_delta) - if self.notebook: - self.write('</div>') - - buffer_put_lines(buf, self.elements) + self.write('</table>', indent) def _write_col_header(self, indent): truncate_h = self.fmt.truncate_h @@ -359,41 +349,29 @@ def _write_row_header(self, indent): self.write_tr(row, indent, self.indent_delta, header=True) def _write_header(self, indent): - if not (self.fmt.header or self.show_row_idx_names): - # write nothing - return indent - self.write('<thead>', indent) - indent += self.indent_delta if self.fmt.header: - self._write_col_header(indent) + self._write_col_header(indent + self.indent_delta) if self.show_row_idx_names: - self._write_row_header(indent) + self._write_row_header(indent + self.indent_delta) - indent -= self.indent_delta self.write('</thead>', indent) - return indent - def _write_body(self, indent): self.write('<tbody>', indent) - indent += self.indent_delta - fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} # write values if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): - self._write_hierarchical_rows(fmt_values, indent) + self._write_hierarchical_rows( + fmt_values, indent + self.indent_delta) else: - self._write_regular_rows(fmt_values, indent) + self._write_regular_rows( + fmt_values, indent + self.indent_delta) - indent -= self.indent_delta self.write('</tbody>', indent) - indent -= self.indent_delta - - return indent def _write_regular_rows(self, fmt_values, indent): truncate_h = self.fmt.truncate_h
- [ n/a] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ n/a] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24637
2019-01-05T12:43:27Z
2019-01-05T14:55:15Z
2019-01-05T14:55:15Z
2019-01-05T21:05:31Z
Fix DeprecationWarning: invalid escape sequence in versioneer.py
diff --git a/versioneer.py b/versioneer.py index 2725fe98641a4..01adaa248dbd4 100644 --- a/versioneer.py +++ b/versioneer.py @@ -464,7 +464,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to run %s (error)" % dispcmd) return None return stdout -LONG_VERSION_PY['git'] = ''' + + +LONG_VERSION_PY['git'] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build
Hello, This is a little patch to fix a `DeprecationWarning: invalid escape sequence` in `versioneer.py`. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24636
2019-01-05T12:19:13Z
2019-01-05T17:52:34Z
2019-01-05T17:52:34Z
2019-01-05T17:58:37Z
DOC: Fix flake8 errors on whatsnew v0.15*
diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index 6f74f0393d123..36f2c9013219b 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -5,11 +5,6 @@ v0.15.0 (October 18, 2014) {{ header }} -.. ipython:: python - :suppress: - - from pandas import * # noqa F401, F403 - This is a major release from 0.14.1 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all @@ -77,7 +72,8 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the .. ipython:: python :okwarning: - df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) + df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], + "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']}) df["grade"] = df["raw_grade"].astype("category") df["grade"] @@ -86,7 +82,8 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the df["grade"].cat.categories = ["very good", "good", "very bad"] # Reorder the categories and simultaneously add the missing categories - df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", "medium", "good", "very good"]) + df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", + "medium", "good", "very good"]) df["grade"] df.sort_values("grade") df.groupby("grade").size() @@ -123,7 +120,7 @@ This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a .. code-block:: ipython # Timedelta accessor - In [9]: tds = Timedelta('31 days 5 min 3 sec') + In [9]: tds = pd.Timedelta('31 days 5 min 3 sec') In [10]: tds.minutes Out[10]: 5L @@ -151,22 +148,22 @@ Construct a scalar .. ipython:: python - Timedelta('1 days 06:05:01.00003') - Timedelta('15.5us') - Timedelta('1 hour 15.5us') + pd.Timedelta('1 days 06:05:01.00003') + pd.Timedelta('15.5us') + pd.Timedelta('1 hour 15.5us') # negative Timedeltas have this string repr # to be more consistent with datetime.timedelta conventions - Timedelta('-1us') + pd.Timedelta('-1us') # a NaT - Timedelta('nan') + pd.Timedelta('nan') Access fields for a ``Timedelta`` .. ipython:: python - td = Timedelta('1 hour 3m 15.5us') + td = pd.Timedelta('1 hour 3m 15.5us') td.seconds td.microseconds td.nanoseconds @@ -177,26 +174,26 @@ Construct a ``TimedeltaIndex`` :suppress: import datetime - from datetime import timedelta .. ipython:: python - TimedeltaIndex(['1 days','1 days, 00:00:05', - np.timedelta64(2,'D'),timedelta(days=2,seconds=2)]) + pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', + np.timedelta64(2, 'D'), + datetime.timedelta(days=2, seconds=2)]) Constructing a ``TimedeltaIndex`` with a regular range .. ipython:: python - timedelta_range('1 days',periods=5,freq='D') - timedelta_range(start='1 days',end='2 days',freq='30T') + pd.timedelta_range('1 days', periods=5, freq='D') + pd.timedelta_range(start='1 days', end='2 days', freq='30T') You can now use a ``TimedeltaIndex`` as the index of a pandas object .. ipython:: python - s = Series(np.arange(5), - index=timedelta_range('1 days',periods=5,freq='s')) + s = pd.Series(np.arange(5), + index=pd.timedelta_range('1 days', periods=5, freq='s')) s You can select with partial string selections @@ -210,9 +207,9 @@ Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow cert .. ipython:: python - tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days']) + tdi = pd.TimedeltaIndex(['1 days', pd.NaT, '2 days']) tdi.tolist() - dti = date_range('20130101',periods=3) + dti = pd.date_range('20130101', periods=3) dti.tolist() (dti + tdi).tolist() @@ -235,9 +232,8 @@ A new display option ``display.memory_usage`` (see :ref:`options`) sets the defa dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', 'complex128', 'object', 'bool'] n = 5000 - data = dict([ (t, np.random.randint(100, size=n).astype(t)) - for t in dtypes]) - df = DataFrame(data) + data = {t: np.random.randint(100, size=n).astype(t) for t in dtypes} + df = pd.DataFrame(data) df['categorical'] = df['object'].astype('category') df.info() @@ -260,7 +256,7 @@ This will return a Series, indexed like the existing Series. See the :ref:`docs .. ipython:: python # datetime - s = Series(date_range('20130101 09:10:12',periods=4)) + s = pd.Series(pd.date_range('20130101 09:10:12', periods=4)) s s.dt.hour s.dt.second @@ -271,7 +267,7 @@ This enables nice expressions like this: .. ipython:: python - s[s.dt.day==2] + s[s.dt.day == 2] You can easily produce tz aware transformations: @@ -292,7 +288,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # period - s = Series(period_range('20130101',periods=4,freq='D')) + s = pd.Series(pd.period_range('20130101', periods=4, freq='D')) s s.dt.year s.dt.day @@ -300,7 +296,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # timedelta - s = Series(timedelta_range('1 day 00:00:05',periods=4,freq='s')) + s = pd.Series(pd.timedelta_range('1 day 00:00:05', periods=4, freq='s')) s s.dt.days s.dt.seconds @@ -318,11 +314,12 @@ Timezone handling improvements .. ipython:: python :okwarning: - ts = Timestamp('2014-08-01 09:00', tz='US/Eastern') + ts = pd.Timestamp('2014-08-01 09:00', tz='US/Eastern') ts ts.tz_localize(None) - didx = DatetimeIndex(start='2014-08-01 09:00', freq='H', periods=10, tz='US/Eastern') + didx = pd.DatetimeIndex(start='2014-08-01 09:00', freq='H', + periods=10, tz='US/Eastern') didx didx.tz_localize(None) @@ -353,11 +350,11 @@ Rolling/Expanding Moments improvements .. ipython:: python - s = Series([10, 11, 12, 13]) + s = pd.Series([10, 11, 12, 13]) .. code-block:: ipython - In [15]: rolling_min(s, window=10, min_periods=5) + In [15]: pd.rolling_min(s, window=10, min_periods=5) ValueError: min_periods (5) must be <= window (4) New behavior @@ -386,7 +383,7 @@ Rolling/Expanding Moments improvements .. code-block:: ipython - In [7]: rolling_sum(Series(range(4)), window=3, min_periods=0, center=True) + In [7]: pd.rolling_sum(Series(range(4)), window=3, min_periods=0, center=True) Out[7]: 0 1 1 3 @@ -398,7 +395,8 @@ Rolling/Expanding Moments improvements .. code-block:: ipython - In [7]: rolling_sum(Series(range(4)), window=3, min_periods=0, center=True) + In [7]: pd.rolling_sum(pd.Series(range(4)), window=3, + ....: min_periods=0, center=True) Out[7]: 0 1 1 3 @@ -412,13 +410,13 @@ Rolling/Expanding Moments improvements .. ipython:: python - s = Series([10.5, 8.8, 11.4, 9.7, 9.3]) + s = pd.Series([10.5, 8.8, 11.4, 9.7, 9.3]) Behavior prior to 0.15.0: .. code-block:: ipython - In [39]: rolling_window(s, window=3, win_type='triang', center=True) + In [39]: pd.rolling_window(s, window=3, win_type='triang', center=True) Out[39]: 0 NaN 1 6.583333 @@ -461,7 +459,7 @@ Rolling/Expanding Moments improvements .. ipython:: python - s = Series([1, None, None, None, 2, 3]) + s = pd.Series([1, None, None, None, 2, 3]) .. code-block:: ipython @@ -503,21 +501,23 @@ Rolling/Expanding Moments improvements .. code-block:: ipython - In [7]: pd.ewma(Series([None, 1., 8.]), com=2.) + In [7]: pd.ewma(pd.Series([None, 1., 8.]), com=2.) Out[7]: 0 NaN 1 1.0 2 5.2 dtype: float64 - In [8]: pd.ewma(Series([1., None, 8.]), com=2., ignore_na=True) # pre-0.15.0 behavior + In [8]: pd.ewma(pd.Series([1., None, 8.]), com=2., + ....: ignore_na=True) # pre-0.15.0 behavior Out[8]: 0 1.0 1 1.0 2 5.2 dtype: float64 - In [9]: pd.ewma(Series([1., None, 8.]), com=2., ignore_na=False) # new default + In [9]: pd.ewma(pd.Series([1., None, 8.]), com=2., + ....: ignore_na=False) # new default Out[9]: 0 1.000000 1 1.000000 @@ -554,7 +554,7 @@ Rolling/Expanding Moments improvements .. ipython:: python - s = Series([1., 2., 0., 4.]) + s = pd.Series([1., 2., 0., 4.]) .. code-block:: ipython @@ -612,8 +612,8 @@ Improvements in the sql io module .. code-block:: python - df.to_sql('table', engine, schema='other_schema') - pd.read_sql_table('table', engine, schema='other_schema') + df.to_sql('table', engine, schema='other_schema') # noqa F821 + pd.read_sql_table('table', engine, schema='other_schema') # noqa F821 - Added support for writing ``NaN`` values with ``to_sql`` (:issue:`2754`). - Added support for writing datetime64 columns with ``to_sql`` for all database flavors (:issue:`7103`). @@ -668,7 +668,7 @@ Other notable API changes: .. ipython:: python - df = DataFrame([['a'],['b']],index=[1,2]) + df = pd.DataFrame([['a'], ['b']], index=[1, 2]) df In prior versions there was a difference in these two constructs: @@ -687,13 +687,13 @@ Other notable API changes: .. code-block:: ipython - In [3]: df.loc[[1,3]] + In [3]: df.loc[[1, 3]] Out[3]: 0 1 a 3 NaN - In [4]: df.loc[[1,3],:] + In [4]: df.loc[[1, 3], :] Out[4]: 0 1 a @@ -703,10 +703,10 @@ Other notable API changes: .. ipython:: python - p = Panel(np.arange(2*3*4).reshape(2,3,4), - items=['ItemA','ItemB'], - major_axis=[1,2,3], - minor_axis=['A','B','C','D']) + p = pd.Panel(np.arange(2 * 3 * 4).reshape(2, 3, 4), + items=['ItemA', 'ItemB'], + major_axis=[1, 2, 3], + minor_axis=['A', 'B', 'C', 'D']) p The following would raise ``KeyError`` prior to 0.15.0: @@ -725,15 +725,16 @@ Other notable API changes: .. ipython:: python :okexcept: - s = Series(np.arange(3,dtype='int64'), - index=MultiIndex.from_product([['A'],['foo','bar','baz']], - names=['one','two']) - ).sort_index() + s = pd.Series(np.arange(3, dtype='int64'), + index=pd.MultiIndex.from_product([['A'], + ['foo', 'bar', 'baz']], + names=['one', 'two']) + ).sort_index() s try: - s.loc[['D']] + s.loc[['D']] except KeyError as e: - print("KeyError: " + str(e)) + print("KeyError: " + str(e)) - Assigning values to ``None`` now considers the dtype when choosing an 'empty' value (:issue:`7941`). @@ -743,7 +744,7 @@ Other notable API changes: .. ipython:: python - s = Series([1, 2, 3]) + s = pd.Series([1, 2, 3]) s.loc[0] = None s @@ -754,7 +755,7 @@ Other notable API changes: .. ipython:: python - s = Series(["a", "b", "c"]) + s = pd.Series(["a", "b", "c"]) s.loc[0] = None s @@ -764,7 +765,7 @@ Other notable API changes: .. ipython:: python - s = Series([1, 2, 3]) + s = pd.Series([1, 2, 3]) s2 = s s += 1.5 @@ -816,9 +817,9 @@ Other notable API changes: .. ipython:: python - i = date_range('1/1/2011', periods=3, freq='10s', tz = 'US/Eastern') + i = pd.date_range('1/1/2011', periods=3, freq='10s', tz='US/Eastern') i - df = DataFrame( {'a' : i } ) + df = pd.DataFrame({'a': i}) df df.dtypes @@ -837,7 +838,7 @@ Other notable API changes: .. code-block:: python - In [1]: df = DataFrame(np.arange(0,9), columns=['count']) + In [1]: df = pd.DataFrame(np.arange(0, 9), columns=['count']) In [2]: df['group'] = 'b' @@ -855,8 +856,8 @@ Other notable API changes: .. ipython:: python - df = DataFrame([[True, 1],[False, 2]], - columns=["female","fitness"]) + df = pd.DataFrame([[True, 1], [False, 2]], + columns=["female", "fitness"]) df df.dtypes @@ -916,18 +917,18 @@ Deprecations .. code-block:: python # + - Index(['a','b','c']) + Index(['b','c','d']) + pd.Index(['a', 'b', 'c']) + pd.Index(['b', 'c', 'd']) # should be replaced by - Index(['a','b','c']).union(Index(['b','c','d'])) + pd.Index(['a', 'b', 'c']).union(pd.Index(['b', 'c', 'd'])) .. code-block:: python # - - Index(['a','b','c']) - Index(['b','c','d']) + pd.Index(['a', 'b', 'c']) - pd.Index(['b', 'c', 'd']) # should be replaced by - Index(['a','b','c']).difference(Index(['b','c','d'])) + pd.Index(['a', 'b', 'c']).difference(pd.Index(['b', 'c', 'd'])) - The ``infer_types`` argument to :func:`~pandas.read_html` now has no effect and is deprecated (:issue:`7762`, :issue:`7032`). @@ -979,10 +980,10 @@ Other: .. ipython:: python - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24), - 'numD': np.arange(24.) + .5}) + df = pd.DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, + 'catB': ['a', 'b', 'c', 'd'] * 6, + 'numC': np.arange(24), + 'numD': np.arange(24.) + .5}) df.describe(include=["object"]) df.describe(include=["number", "object"], exclude=["float"]) @@ -1002,7 +1003,7 @@ Other: .. ipython:: python - df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['c', 'c', 'b'], + df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['c', 'c', 'b'], 'C': [1, 2, 3]}) pd.get_dummies(df) @@ -1015,7 +1016,7 @@ Other: .. ipython:: python business_dates = date_range(start='4/1/2014', end='6/30/2014', freq='B') - df = DataFrame(1, index=business_dates, columns=['a', 'b']) + df = pd.DataFrame(1, index=business_dates, columns=['a', 'b']) # get the first, 4th, and last date index for each month df.groupby([df.index.year, df.index.month]).nth([0, 3, -1]) @@ -1025,14 +1026,14 @@ Other: .. ipython:: python - idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H') - idx - idx + pd.offsets.Hour(2) - idx + Timedelta('120m') + idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H') + idx + idx + pd.offsets.Hour(2) + idx + pd.Timedelta('120m') - idx = pd.period_range('2014-07', periods=5, freq='M') - idx - idx + pd.offsets.MonthEnd(3) + idx = pd.period_range('2014-07', periods=5, freq='M') + idx + idx + pd.offsets.MonthEnd(3) - Added experimental compatibility with ``openpyxl`` for versions >= 2.0. The ``DataFrame.to_excel`` method ``engine`` keyword now recognizes ``openpyxl1`` and ``openpyxl2`` @@ -1051,18 +1052,19 @@ Other: .. ipython:: python - idx = MultiIndex.from_product([['a'], range(3), list("pqr")], names=['foo', 'bar', 'baz']) + idx = pd.MultiIndex.from_product([['a'], range(3), list("pqr")], + names=['foo', 'bar', 'baz']) idx.set_names('qux', level=0) - idx.set_names(['qux','corge'], level=[0,1]) - idx.set_levels(['a','b','c'], level='bar') - idx.set_levels([['a','b','c'],[1,2,3]], level=[1,2]) + idx.set_names(['qux', 'corge'], level=[0, 1]) + idx.set_levels(['a', 'b', 'c'], level='bar') + idx.set_levels([['a', 'b', 'c'], [1, 2, 3]], level=[1, 2]) - ``Index.isin`` now supports a ``level`` argument to specify which index level to use for membership tests (:issue:`7892`, :issue:`7890`) .. code-block:: ipython - In [1]: idx = MultiIndex.from_product([[0, 1], ['a', 'b', 'c']]) + In [1]: idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b', 'c']]) In [2]: idx.values Out[2]: array([(0, 'a'), (0, 'b'), (0, 'c'), (1, 'a'), (1, 'b'), (1, 'c')], dtype=object) @@ -1074,7 +1076,7 @@ Other: .. ipython:: python - idx = Index([1, 2, 3, 4, 1, 2]) + idx = pd.Index([1, 2, 3, 4, 1, 2]) idx idx.duplicated() idx.drop_duplicates() diff --git a/doc/source/whatsnew/v0.15.1.rst b/doc/source/whatsnew/v0.15.1.rst index be7cf04bcdd68..1091944cb056f 100644 --- a/doc/source/whatsnew/v0.15.1.rst +++ b/doc/source/whatsnew/v0.15.1.rst @@ -5,11 +5,6 @@ v0.15.1 (November 9, 2014) {{ header }} -.. ipython:: python - :suppress: - - from pandas import * # noqa F401, F403 - This is a minor bug-fix release from 0.15.0 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all @@ -28,7 +23,7 @@ API changes .. ipython:: python - s = Series(date_range('20130101',periods=5,freq='D')) + s = pd.Series(pd.date_range('20130101', periods=5, freq='D')) s.iloc[2] = np.nan s @@ -56,12 +51,12 @@ API changes .. ipython:: python - np.random.seed(2718281) - df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), - columns=['jim', 'joe']) - df.head() + np.random.seed(2718281) + df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), + columns=['jim', 'joe']) + df.head() - ts = pd.Series(5 * np.random.randint(0, 3, 10)) + ts = pd.Series(5 * np.random.randint(0, 3, 10)) previous behavior: @@ -156,9 +151,9 @@ API changes In [17]: from pandas.io.data import Options - In [18]: aapl = Options('aapl','yahoo') + In [18]: aapl = Options('aapl', 'yahoo') - In [19]: aapl.get_call_data().iloc[0:5,0:1] + In [19]: aapl.get_call_data().iloc[0:5, 0:1] Out[19]: Last Strike Expiry Type Symbol @@ -183,7 +178,7 @@ API changes datetime.date(2016, 1, 15), datetime.date(2017, 1, 20)] - In [21]: aapl.get_near_stock_price(expiry=aapl.expiry_dates[0:3]).iloc[0:5,0:1] + In [21]: aapl.get_near_stock_price(expiry=aapl.expiry_dates[0:3]).iloc[0:5, 0:1] Out[21]: Last Strike Expiry Type Symbol @@ -233,7 +228,8 @@ Enhancements .. ipython:: python - dfi = DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']) + dfi = pd.DataFrame(1, index=pd.MultiIndex.from_product([['a'], + range(1000)]), columns=['A']) previous behavior: diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst index 437dd3f8d3df6..dabdcd1ab76c3 100644 --- a/doc/source/whatsnew/v0.15.2.rst +++ b/doc/source/whatsnew/v0.15.2.rst @@ -5,11 +5,6 @@ v0.15.2 (December 12, 2014) {{ header }} -.. ipython:: python - :suppress: - - from pandas import * # noqa F401, F403 - This is a minor release from 0.15.1 and includes a large number of bug fixes along with several new features, enhancements, and performance improvements. @@ -79,7 +74,7 @@ API changes .. ipython:: python - data = pd.DataFrame({'x':[1, 2, 3]}) + data = pd.DataFrame({'x': [1, 2, 3]}) data.y = 2 data['y'] = [2, 4, 6] data @@ -154,7 +149,7 @@ Other enhancements: .. code-block:: python from sqlalchemy.types import String - data.to_sql('data_dtype', engine, dtype={'Col_1': String}) + data.to_sql('data_dtype', engine, dtype={'Col_1': String}) # noqa F821 - ``Series.all`` and ``Series.any`` now support the ``level`` and ``skipna`` parameters (:issue:`8302`): diff --git a/setup.cfg b/setup.cfg index 6c076eed580dd..3b7d1da9a2b02 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,9 +46,6 @@ ignore = E402, # module level import not at top of file E711, # comparison to none should be 'if cond is none:' exclude = - doc/source/whatsnew/v0.15.0.rst - doc/source/whatsnew/v0.15.1.rst - doc/source/whatsnew/v0.15.2.rst doc/source/basics.rst doc/source/contributing_docstring.rst doc/source/enhancingperf.rst
- [ ] closes #24239
https://api.github.com/repos/pandas-dev/pandas/pulls/24635
2019-01-05T11:10:07Z
2019-01-05T14:54:55Z
2019-01-05T14:54:55Z
2019-01-05T15:03:43Z
TST: Fixed timezone issues post DatetimeArray refactor
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 7fa386935e3f4..2c8f3ad07b639 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1553,6 +1553,7 @@ Timezones - Bug in :func:`to_datetime` where ``utc=True`` was not respected when specifying a ``unit`` and ``errors='ignore'`` (:issue:`23758`) - Bug in :func:`to_datetime` where ``utc=True`` was not respected when passing a :class:`Timestamp` (:issue:`24415`) - Bug in :meth:`DataFrame.any` returns wrong value when ``axis=1`` and the data is of datetimelike type (:issue:`23070`) +- Bug in :meth:`DatetimeIndex.to_period` where a timezone aware index was converted to UTC first before creating :class:`PeriodIndex` (:issue:`22905`) Offsets ^^^^^^^ @@ -1802,6 +1803,9 @@ Reshaping - Constructing a DataFrame with an index argument that wasn't already an instance of :class:`~pandas.core.Index` was broken (:issue:`22227`). - Bug in :class:`DataFrame` prevented list subclasses to be used to construction (:issue:`21226`) - Bug in :func:`DataFrame.unstack` and :func:`DataFrame.pivot_table` returning a missleading error message when the resulting DataFrame has more elements than int32 can handle. Now, the error message is improved, pointing towards the actual problem (:issue:`20601`) +- Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`) +- Bug in :func:`DataFrame.stack` where timezone aware values were converted to timezone naive values (:issue:`19420`) +- Bug in :func:`merge_asof` where a ``TypeError`` was raised when ``by_col`` were timezone aware values (:issue:`21184`) .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 362650714418f..f2f6944a21e03 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -936,3 +936,36 @@ def test_unstack_fill_frame_object(): index=list('xyz') ) assert_frame_equal(result, expected) + + +def test_unstack_timezone_aware_values(): + # GH 18338 + df = pd.DataFrame({ + 'timestamp': [ + pd.Timestamp('2017-08-27 01:00:00.709949+0000', tz='UTC')], + 'a': ['a'], + 'b': ['b'], + 'c': ['c'], + }, columns=['timestamp', 'a', 'b', 'c']) + result = df.set_index(['a', 'b']).unstack() + expected = pd.DataFrame([[pd.Timestamp('2017-08-27 01:00:00.709949+0000', + tz='UTC'), + 'c']], + index=pd.Index(['a'], name='a'), + columns=pd.MultiIndex( + levels=[['timestamp', 'c'], ['b']], + codes=[[0, 1], [0, 0]], + names=[None, 'b'])) + assert_frame_equal(result, expected) + + +def test_stack_timezone_aware_values(): + # GH 19420 + ts = pd.date_range(freq="D", start="20180101", end="20180103", + tz="America/New_York") + df = pd.DataFrame({"A": ts}, index=["a", "b", "c"]) + result = df.stack() + expected = pd.Series(ts, + index=pd.MultiIndex(levels=[['a', 'b', 'c'], ['A']], + codes=[[0, 1, 2], [0, 0, 0]])) + assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index c03b8afbe79bf..784d1ca6fb82c 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -293,6 +293,15 @@ def test_to_period_tz(self, tz): tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('tz', ['Etc/GMT-1', 'Etc/GMT+1']) + def test_to_period_tz_utc_offset_consistency(self, tz): + # GH 22905 + ts = pd.date_range('1/1/2000', '2/1/2000', tz='Etc/GMT-1') + with tm.assert_produces_warning(UserWarning): + result = ts.to_period()[0] + expected = ts[0].to_period() + assert result == expected + def test_to_period_nofreq(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) with pytest.raises(ValueError): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 1483654daa99e..1d1d7d48adaab 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1022,3 +1022,17 @@ def test_merge_on_nans(self, func, side): merge_asof(df_null, df, on='a') else: merge_asof(df, df_null, on='a') + + def test_merge_by_col_tz_aware(self): + # GH 21184 + left = pd.DataFrame( + {'by_col': pd.DatetimeIndex(['2018-01-01']).tz_localize('UTC'), + 'on_col': [2], 'values': ['a']}) + right = pd.DataFrame( + {'by_col': pd.DatetimeIndex(['2018-01-01']).tz_localize('UTC'), + 'on_col': [1], 'values': ['b']}) + result = pd.merge_asof(left, right, by='by_col', on='on_col') + expected = pd.DataFrame([ + [pd.Timestamp('2018-01-01', tz='UTC'), 2, 'a', 'b'] + ], columns=['by_col', 'on_col', 'values_x', 'values_y']) + assert_frame_equal(result, expected)
- [x] closes #22905 - [x] closes #18338 - [x] closes #19420 - [x] closes #21184 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24634
2019-01-05T08:27:33Z
2019-01-06T15:53:49Z
2019-01-06T15:53:48Z
2019-01-06T18:04:29Z
DOC: update applymap docstring in pandas/core/frame.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a50def7357826..9c9720522189d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6501,6 +6501,14 @@ def applymap(self, func): -------- DataFrame.apply : Apply a function along input axis of DataFrame. + Notes + ----- + In the current implementation applymap calls `func` twice on the + first column/row to decide whether it can take a fast or slow + code path. This can lead to unexpected behavior if `func` has + side-effects, as they will take effect twice for the first + column/row. + Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
- [x] closes #24612 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24633
2019-01-05T04:02:53Z
2019-01-05T14:54:34Z
2019-01-05T14:54:34Z
2019-01-05T15:37:42Z
CI: unify environment creation
diff --git a/.travis.yml b/.travis.yml index 529f1221899dc..022e11b7db950 100644 --- a/.travis.yml +++ b/.travis.yml @@ -82,15 +82,10 @@ before_install: install: - echo "install start" - ci/prep_cython_cache.sh - - ci/install_travis.sh + - ci/setup_env.sh - ci/submit_cython_cache.sh - echo "install done" -before_script: - - ci/install_db_travis.sh - - export DISPLAY=":99.0" - - ci/before_script_travis.sh - script: - echo "script start" - source activate pandas-dev diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9b1b17b453af3..eee38dadfab90 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -39,9 +39,8 @@ jobs: - script: | export PATH=$HOME/miniconda3/bin:$PATH sudo apt-get install -y libc6-dev-i386 - ci/incremental/install_miniconda.sh - ci/incremental/setup_conda_environment.sh - displayName: 'Set up environment' + ci/setup_env.sh + displayName: 'Setup environment and build pandas' condition: true # Do not require pandas @@ -59,13 +58,6 @@ jobs: displayName: 'Dependencies consistency' condition: true - - script: | - export PATH=$HOME/miniconda3/bin:$PATH - source activate pandas-dev - ci/incremental/build.sh - displayName: 'Build' - condition: true - # Require pandas - script: | export PATH=$HOME/miniconda3/bin:$PATH diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index 7119054cf2f53..f53e284c221c6 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -50,17 +50,9 @@ jobs: steps: - script: | if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386 $EXTRA_APT; fi - echo "Installing Miniconda" - ci/incremental/install_miniconda.sh - export PATH=$HOME/miniconda3/bin:$PATH - echo "Setting up Conda environment" - ci/incremental/setup_conda_environment.sh - displayName: 'Before Install' - - script: | - export PATH=$HOME/miniconda3/bin:$PATH - source activate pandas-dev - ci/incremental/build.sh - displayName: 'Build' + echo "Creating Environment" + ci/setup_env.sh + displayName: 'Setup environment and build pandas' - script: | export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev diff --git a/ci/before_script_travis.sh b/ci/before_script_travis.sh deleted file mode 100755 index 0b3939b1906a2..0000000000000 --- a/ci/before_script_travis.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -echo "inside $0" - -if [ "${TRAVIS_OS_NAME}" == "linux" ]; then - sh -e /etc/init.d/xvfb start - sleep 3 -fi - -# Never fail because bad things happened here. -true diff --git a/ci/incremental/build.sh b/ci/incremental/build.sh deleted file mode 100755 index 05648037935a3..0000000000000 --- a/ci/incremental/build.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Make sure any error below is reported as such -set -v -e - -echo "[building extensions]" -python setup.py build_ext -q --inplace -python -m pip install -e . - -echo -echo "[show environment]" -conda list - -echo -echo "[done]" -exit 0 diff --git a/ci/incremental/install_miniconda.sh b/ci/incremental/install_miniconda.sh deleted file mode 100755 index a47dfdb324b34..0000000000000 --- a/ci/incremental/install_miniconda.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -v -e - -# Install Miniconda -unamestr=`uname` -if [[ "$unamestr" == 'Linux' ]]; then - if [[ "$BITS32" == "yes" ]]; then - wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86.sh -O miniconda.sh - else - wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - fi -elif [[ "$unamestr" == 'Darwin' ]]; then - wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh -else - echo Error -fi -chmod +x miniconda.sh -./miniconda.sh -b diff --git a/ci/incremental/setup_conda_environment.sh b/ci/incremental/setup_conda_environment.sh deleted file mode 100755 index f174c17a614d8..0000000000000 --- a/ci/incremental/setup_conda_environment.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -set -v -e - -CONDA_INSTALL="conda install -q -y" -PIP_INSTALL="pip install -q" - - -# Deactivate any environment -source deactivate -# Display root environment (for debugging) -conda list -# Clean up any left-over from a previous build -# (note workaround for https://github.com/conda/conda/issues/2679: -# `conda env remove` issue) -conda remove --all -q -y -n pandas-dev - -echo -echo "[create env]" -time conda env create -q --file="${ENV_FILE}" || exit 1 - -set +v -source activate pandas-dev -set -v - -# remove any installed pandas package -# w/o removing anything else -echo -echo "[removing installed pandas]" -conda remove pandas -y --force || true -pip uninstall -y pandas || true - -echo -echo "[no installed pandas]" -conda list pandas - -if [ -n "$LOCALE_OVERRIDE" ]; then - sudo locale-gen "$LOCALE_OVERRIDE" -fi - -# # Install the compiler toolchain -# if [[ $(uname) == Linux ]]; then -# if [[ "$CONDA_SUBDIR" == "linux-32" || "$BITS32" == "yes" ]] ; then -# $CONDA_INSTALL gcc_linux-32 gxx_linux-32 -# else -# $CONDA_INSTALL gcc_linux-64 gxx_linux-64 -# fi -# elif [[ $(uname) == Darwin ]]; then -# $CONDA_INSTALL clang_osx-64 clangxx_osx-64 -# # Install llvm-openmp and intel-openmp on OSX too -# $CONDA_INSTALL llvm-openmp intel-openmp -# fi diff --git a/ci/install_db_travis.sh b/ci/install_db_travis.sh deleted file mode 100755 index e4e6d7a5a9b85..0000000000000 --- a/ci/install_db_travis.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ "${TRAVIS_OS_NAME}" != "linux" ]; then - echo "not using dbs on non-linux" - exit 0 -fi - -echo "installing dbs" -mysql -e 'create database pandas_nosetest;' -psql -c 'create database pandas_nosetest;' -U postgres - -echo "done" -exit 0 diff --git a/ci/install_travis.sh b/ci/install_travis.sh deleted file mode 100755 index d1a940f119228..0000000000000 --- a/ci/install_travis.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -# edit the locale file if needed -function edit_init() -{ - if [ -n "$LOCALE_OVERRIDE" ]; then - echo "[Adding locale to the first line of pandas/__init__.py]" - rm -f pandas/__init__.pyc - sedc="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LOCALE_OVERRIDE')\n" - sed -i "$sedc" pandas/__init__.py - echo "[head -4 pandas/__init__.py]" - head -4 pandas/__init__.py - echo - fi -} - -echo -echo "[install_travis]" -edit_init - -home_dir=$(pwd) -echo -echo "[home_dir]: $home_dir" - -# install miniconda -MINICONDA_DIR="$HOME/miniconda3" - -echo -echo "[Using clean Miniconda install]" - -if [ -d "$MINICONDA_DIR" ]; then - rm -rf "$MINICONDA_DIR" -fi - -# install miniconda -if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -q -O miniconda.sh || exit 1 -else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 -fi -time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 - -echo -echo "[show conda]" -which conda - -echo -echo "[update conda]" -conda config --set ssl_verify false || exit 1 -conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 -conda update -q conda - -# Useful for debugging any issues with conda -conda info -a || exit 1 - -# set the compiler cache to work -echo -if [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then - echo "[Using ccache]" - export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH - gcc=$(which gcc) - echo "[gcc]: $gcc" - ccache=$(which ccache) - echo "[ccache]: $ccache" - export CC='ccache gcc' -elif [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then - echo "[Install ccache]" - brew install ccache > /dev/null 2>&1 - echo "[Using ccache]" - export PATH=/usr/local/opt/ccache/libexec:$PATH - gcc=$(which gcc) - echo "[gcc]: $gcc" - ccache=$(which ccache) - echo "[ccache]: $ccache" -else - echo "[Not using ccache]" -fi - -echo -echo "[create env]" - -# create our environment -time conda env create -q --file="${ENV_FILE}" || exit 1 - -source activate pandas-dev - -# remove any installed pandas package -# w/o removing anything else -echo -echo "[removing installed pandas]" -conda remove pandas -y --force -pip uninstall -y pandas - -echo -echo "[no installed pandas]" -conda list pandas -pip list --format columns |grep pandas - -# build and install -echo "[running setup.py develop]" -python setup.py develop || exit 1 - -echo -echo "[show environment]" -conda list - -echo -echo "[done]" -exit 0 diff --git a/ci/setup_env.sh b/ci/setup_env.sh new file mode 100755 index 0000000000000..414a5c8705ee9 --- /dev/null +++ b/ci/setup_env.sh @@ -0,0 +1,135 @@ +#!/bin/bash -e + + +# edit the locale file if needed +if [ -n "$LOCALE_OVERRIDE" ]; then + echo "Adding locale to the first line of pandas/__init__.py" + rm -f pandas/__init__.pyc + SEDC="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LOCALE_OVERRIDE')\n" + sed -i "$SEDC" pandas/__init__.py + echo "[head -4 pandas/__init__.py]" + head -4 pandas/__init__.py + echo + sudo locale-gen "$LOCALE_OVERRIDE" +fi + +MINICONDA_DIR="$HOME/miniconda3" + + +if [ -d "$MINICONDA_DIR" ]; then + echo + echo "rm -rf "$MINICONDA_DIR"" + rm -rf "$MINICONDA_DIR" +fi + +echo "Install Miniconda" +UNAME_OS=$(uname) +if [[ "$UNAME_OS" == 'Linux' ]]; then + if [[ "$BITS32" == "yes" ]]; then + CONDA_OS="Linux-x86" + else + CONDA_OS="Linux-x86_64" + fi +elif [[ "$UNAME_OS" == 'Darwin' ]]; then + CONDA_OS="MacOSX-x86_64" +else + echo "OS $UNAME_OS not supported" + exit 1 +fi + +wget -q "https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" -O miniconda.sh +chmod +x miniconda.sh +./miniconda.sh -b + +export PATH=$MINICONDA_DIR/bin:$PATH + +echo +echo "which conda" +which conda + +echo +echo "update conda" +conda config --set ssl_verify false +conda config --set quiet true --set always_yes true --set changeps1 false +conda update -n base conda + +echo "conda info -a" +conda info -a + +echo +echo "set the compiler cache to work" +if [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then + echo "Using ccache" + export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH + GCC=$(which gcc) + echo "gcc: $GCC" + CCACHE=$(which ccache) + echo "ccache: $CCACHE" + export CC='ccache gcc' +elif [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then + echo "Install ccache" + brew install ccache > /dev/null 2>&1 + echo "Using ccache" + export PATH=/usr/local/opt/ccache/libexec:$PATH + gcc=$(which gcc) + echo "gcc: $gcc" + CCACHE=$(which ccache) + echo "ccache: $CCACHE" +else + echo "Not using ccache" +fi + +echo "source deactivate" +source deactivate + +echo "conda list (root environment)" +conda list + +# Clean up any left-over from a previous build +# (note workaround for https://github.com/conda/conda/issues/2679: +# `conda env remove` issue) +conda remove --all -q -y -n pandas-dev + +echo +echo "conda env create -q --file=${ENV_FILE}" +time conda env create -q --file="${ENV_FILE}" + +echo "activate pandas-dev" +source activate pandas-dev + +echo +echo "remove any installed pandas package" +echo "w/o removing anything else" +conda remove pandas -y --force || true +pip uninstall -y pandas || true + +echo +echo "conda list pandas" +conda list pandas + +# Make sure any error below is reported as such + +echo "Build extensions and install pandas" +python setup.py build_ext -q --inplace +python -m pip install -e . + +echo +echo "conda list" +conda list + +# Install DB for Linux +export DISPLAY=":99." +if [ ${TRAVIS_OS_NAME} == "linux" ]; then + echo "installing dbs" + mysql -e 'create database pandas_nosetest;' + psql -c 'create database pandas_nosetest;' -U postgres + + echo + echo "sh -e /etc/init.d/xvfb start" + sh -e /etc/init.d/xvfb start + sleep 3 +else + echo "not using dbs on non-linux" +fi + +echo "done"
closes #24498 closes #23923
https://api.github.com/repos/pandas-dev/pandas/pulls/24632
2019-01-05T03:58:29Z
2019-04-05T00:42:58Z
2019-04-05T00:42:57Z
2019-04-15T18:30:45Z
DOC: Improve DataFrame.align docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d0555bd2e44b1..a8b90e413ebad 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8345,8 +8345,17 @@ def ranker(data): fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value - method : str, default None + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. fill_axis : %(axes_single_arg)s, default 0 Filling axis, method and limit broadcast_axis : %(axes_single_arg)s, default None
The docstring for `DataFrame.align` currently has no descriptions for the `method` or `limit` arguments (which both refer to the same arguments of `fillna`), so this PR adds those descriptions. https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.align.html
https://api.github.com/repos/pandas-dev/pandas/pulls/24631
2019-01-05T03:37:42Z
2019-01-05T14:54:06Z
2019-01-05T14:54:06Z
2019-01-05T15:08:15Z
Have Categorical ops defer to DataFrame; broken off of #24282
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3be87c4cabaf0..09dd857182592 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1353,6 +1353,7 @@ Categorical - Bug in many methods of the ``.str``-accessor, which always failed on calling the ``CategoricalIndex.str`` constructor (:issue:`23555`, :issue:`23556`) - Bug in :meth:`Series.where` losing the categorical dtype for categorical data (:issue:`24077`) - Bug in :meth:`Categorical.apply` where ``NaN`` values could be handled unpredictably. They now remain unchanged (:issue:`24241`) +- Bug in :class:`Categorical` comparison methods incorrectly raising ``ValueError`` when operating against a :class:`DataFrame` (:issue:`24630`) Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 969add2d3efef..ceab3d0f53a3b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -23,7 +23,7 @@ is_timedelta64_dtype) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ( - ABCCategoricalIndex, ABCIndexClass, ABCSeries) + ABCCategoricalIndex, ABCDataFrame, ABCIndexClass, ABCSeries) from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna @@ -59,9 +59,11 @@ def f(self, other): # results depending whether categories are the same or not is kind of # insane, so be a bit stricter here and use the python3 idea of # comparing only things of equal type. - if isinstance(other, ABCSeries): + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented + other = lib.item_from_zerodim(other) + if not self.ordered: if op in ['__lt__', '__gt__', '__le__', '__ge__']: raise TypeError("Unordered Categoricals can only compare " @@ -105,7 +107,6 @@ def f(self, other): # # With cat[0], for example, being ``np.int64(1)`` by the time it gets # into this function would become ``np.array(1)``. - other = lib.item_from_zerodim(other) if is_scalar(other): if other in self.categories: i = self.categories.get_loc(other) diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 9304df58bba95..b2965bbcc456a 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import operator import numpy as np import pytest @@ -113,9 +114,34 @@ def test_comparisons(self): res = cat_rev > "b" tm.assert_numpy_array_equal(res, exp) + # check that zero-dim array gets unboxed + res = cat_rev > np.array("b") + tm.assert_numpy_array_equal(res, exp) + class TestCategoricalOps(object): + def test_compare_frame(self): + # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame + data = ["a", "b", 2, "a"] + cat = Categorical(data) + + df = DataFrame(cat) + + for op in [operator.eq, operator.ne, operator.ge, + operator.gt, operator.le, operator.lt]: + with pytest.raises(ValueError): + # alignment raises unless we transpose + op(cat, df) + + result = cat == df.T + expected = DataFrame([[True, True, True, True]]) + tm.assert_frame_equal(result, expected) + + result = cat[::-1] != df.T + expected = DataFrame([[False, True, True, False]]) + tm.assert_frame_equal(result, expected) + def test_datetime_categorical_comparison(self): dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True) tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24630
2019-01-04T23:54:14Z
2019-01-05T14:53:45Z
2019-01-05T14:53:45Z
2019-01-05T15:38:52Z
CLN: replace lambdas with named functions so they are labeled in asv
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 198ed1c90a2e9..7c78fe7e7a177 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -3,17 +3,55 @@ from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex +def no_change(arr): + return arr + + +def list_of_str(arr): + return list(arr.astype(str)) + + +def gen_of_str(arr): + return (x for x in arr.astype(str)) + + +def arr_dict(arr): + return dict(zip(range(len(arr)), arr)) + + +def list_of_tuples(arr): + return [(i, -i) for i in arr] + + +def gen_of_tuples(arr): + return ((i, -i) for i in arr) + + +def list_of_lists(arr): + return [[i, -i] for i in arr] + + +def list_of_tuples_with_none(arr): + return [(i, -i) for i in arr][:-1] + [None] + + +def list_of_lists_with_none(arr): + return [[i, -i] for i in arr][:-1] + [None] + + class SeriesConstructors(object): param_names = ["data_fmt", "with_index"] - params = [[lambda x: x, + params = [[no_change, list, - lambda arr: list(arr.astype(str)), - lambda arr: dict(zip(range(len(arr)), arr)), - lambda arr: [(i, -i) for i in arr], - lambda arr: [[i, -i] for i in arr], - lambda arr: ([(i, -i) for i in arr][:-1] + [None]), - lambda arr: ([[i, -i] for i in arr][:-1] + [None])], + list_of_str, + gen_of_str, + arr_dict, + list_of_tuples, + gen_of_tuples, + list_of_lists, + list_of_tuples_with_none, + list_of_lists_with_none], [False, True]] def setup(self, data_fmt, with_index):
This complements a PR in `asv` (https://github.com/airspeed-velocity/asv/pull/771), but would be worthwhile on its own. `asv` compares benchmarks based on the `repr()` of the parameters, which causes issues when the parameter is an object that doesn't override the default `repr()` (such as functions, especially lambda functions): ``` Benchmarks that have stayed the same: before after ratio [24ab22f7] [d7cef344] n/a 997±3μs n/a ctors.SeriesConstructors.time_series_constructor(<function SeriesConstructors.<lambda> at 0x7f0deb0800d0>, False) n/a 1.05±0ms n/a ctors.SeriesConstructors.time_series_constructor(<function SeriesConstructors.<lambda> at 0x7f0deb0800d0>, True) 1.04±0.01ms n/a n/a ctors.SeriesConstructors.time_series_constructor(<function SeriesConstructors.<lambda> at 0x7fd9d0bbc598>, False) 1.06±0.02ms n/a n/a ctors.SeriesConstructors.time_series_constructor(<function SeriesConstructors.<lambda> at 0x7fd9d0bbc598>, True) ``` As the memory address component of the name changes between runs, `asv` fails at lining up the comparison. This has to be fixed upstream. However, passing `lambda` functions is particularly unhelpful, as we have no idea which of the six such objects are represented in the example above. This PR simply gives each function a descriptive name: ``` asv continuous -b SeriesConstructors v0.20.0..HEAD [...] [ 50.00%] · For pandas commit 20c2a780 <asv_change> (round 2/2): [ 50.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 75.00%] ··· ctors.SeriesConstructors.time_series_constructor ok [ 75.00%] ··· ======================================================= ============= ============= -- with_index ------------------------------------------------------- --------------------------- data_fmt False True ======================================================= ============= ============= <function no_change at 0x7efcd7b67c80> 74.0±0.4μs 131±3μs list 1.15±0.03ms 1.20±0.01ms <function list_of_str at 0x7efcd7b67d08> 688±9μs 741±6μs <function arr_dict at 0x7efcd7b67b70> 4.49±0.01ms 4.71±0.02ms <function list_of_tuples at 0x7efcd7b679d8> 1.02±0.01ms 1.10±0.01ms <function list_of_lists at 0x7efcd7b67a60> 1.04±0.03ms 1.09±0.04ms <function list_of_tuples_with_none at 0x7efcd7b67ae8> 1.02±0.02ms 1.08±0.02ms <function list_of_lists_with_none at 0x7efcd7b676a8> 1.05±0.01ms 1.10±0.01ms ======================================================= ============= ============= [ 75.00%] · For pandas commit 91753873 <maybe_convert_objects_int_overflow_fix~1> (round 2/2): [ 75.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt..... [ 75.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [100.00%] ··· ctors.SeriesConstructors.time_series_constructor ok [100.00%] ··· ======================================================= ============= ============= -- with_index ------------------------------------------------------- --------------------------- data_fmt False True ======================================================= ============= ============= <function no_change at 0x7efcd7b67c80> 72.9±0.9μs 126±1μs list 1.10±0ms 1.16±0.02ms <function list_of_str at 0x7efcd7b67d08> 699±10μs 760±3μs <function arr_dict at 0x7efcd7b67b70> 4.54±0.08ms 4.71±0.04ms <function list_of_tuples at 0x7efcd7b679d8> 1.01±0.01ms 1.08±0.02ms <function list_of_lists at 0x7efcd7b67a60> 1.01±0.01ms 1.07±0.01ms <function list_of_tuples_with_none at 0x7efcd7b67ae8> 1.02±0.01ms 1.09±0.02ms <function list_of_lists_with_none at 0x7efcd7b676a8> 1.05±0ms 1.08±0.01ms ======================================================= ============= ============= BENCHMARKS NOT SIGNIFICANTLY CHANGED. ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24629
2019-01-04T22:37:20Z
2019-01-05T14:53:23Z
2019-01-05T14:53:22Z
2019-01-05T14:53:25Z
catch complex nan in util.is_nan, de-dup+optimize libmissing, tests
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index e922a5d1c3b27..229edbac4992d 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -12,7 +12,9 @@ cimport pandas._libs.util as util from pandas._libs.tslibs.np_datetime cimport ( get_timedelta64_value, get_datetime64_value) -from pandas._libs.tslibs.nattype cimport checknull_with_nat, c_NaT +from pandas._libs.tslibs.nattype cimport ( + checknull_with_nat, c_NaT as NaT, is_null_datetimelike) + cdef float64_t INF = <float64_t>np.inf cdef float64_t NEGINF = -INF @@ -20,25 +22,6 @@ cdef float64_t NEGINF = -INF cdef int64_t NPY_NAT = util.get_nat() -cdef inline bint _check_all_nulls(object val): - """ utility to check if a value is any type of null """ - res: bint - - if isinstance(val, (float, complex)): - res = val != val - elif val is c_NaT: - res = 1 - elif val is None: - res = 1 - elif util.is_datetime64_object(val): - res = get_datetime64_value(val) == NPY_NAT - elif util.is_timedelta64_object(val): - res = get_timedelta64_value(val) == NPY_NAT - else: - res = 0 - return res - - cpdef bint checknull(object val): """ Return boolean describing of the input is NA-like, defined here as any @@ -62,18 +45,7 @@ cpdef bint checknull(object val): The difference between `checknull` and `checknull_old` is that `checknull` does *not* consider INF or NEGINF to be NA. """ - if util.is_float_object(val) or util.is_complex_object(val): - return val != val # and val != INF and val != NEGINF - elif util.is_datetime64_object(val): - return get_datetime64_value(val) == NPY_NAT - elif val is c_NaT: - return True - elif util.is_timedelta64_object(val): - return get_timedelta64_value(val) == NPY_NAT - elif util.is_array(val): - return False - else: - return val is None or util.is_nan(val) + return is_null_datetimelike(val, inat_is_null=False) cpdef bint checknull_old(object val): @@ -101,18 +73,11 @@ cpdef bint checknull_old(object val): The difference between `checknull` and `checknull_old` is that `checknull` does *not* consider INF or NEGINF to be NA. """ - if util.is_float_object(val) or util.is_complex_object(val): - return val != val or val == INF or val == NEGINF - elif util.is_datetime64_object(val): - return get_datetime64_value(val) == NPY_NAT - elif val is c_NaT: + if checknull(val): return True - elif util.is_timedelta64_object(val): - return get_timedelta64_value(val) == NPY_NAT - elif util.is_array(val): - return False - else: - return val is None or util.is_nan(val) + elif util.is_float_object(val) or util.is_complex_object(val): + return val == INF or val == NEGINF + return False cdef inline bint _check_none_nan_inf_neginf(object val): @@ -128,7 +93,7 @@ cdef inline bint _check_none_nan_inf_neginf(object val): cpdef ndarray[uint8_t] isnaobj(ndarray arr): """ Return boolean mask denoting which elements of a 1-D array are na-like, - according to the criteria defined in `_check_all_nulls`: + according to the criteria defined in `checknull`: - None - nan - NaT @@ -154,7 +119,7 @@ cpdef ndarray[uint8_t] isnaobj(ndarray arr): result = np.empty(n, dtype=np.uint8) for i in range(n): val = arr[i] - result[i] = _check_all_nulls(val) + result[i] = checknull(val) return result.view(np.bool_) @@ -189,7 +154,7 @@ def isnaobj_old(ndarray arr): result = np.zeros(n, dtype=np.uint8) for i in range(n): val = arr[i] - result[i] = val is c_NaT or _check_none_nan_inf_neginf(val) + result[i] = val is NaT or _check_none_nan_inf_neginf(val) return result.view(np.bool_) @@ -299,7 +264,7 @@ cdef inline bint is_null_datetime64(v): if checknull_with_nat(v): return True elif util.is_datetime64_object(v): - return v.view('int64') == NPY_NAT + return get_datetime64_value(v) == NPY_NAT return False @@ -309,7 +274,7 @@ cdef inline bint is_null_timedelta64(v): if checknull_with_nat(v): return True elif util.is_timedelta64_object(v): - return v.view('int64') == NPY_NAT + return get_timedelta64_value(v) == NPY_NAT return False diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd index ee8d5ca3d861c..dae5bdc3f93b1 100644 --- a/pandas/_libs/tslibs/nattype.pxd +++ b/pandas/_libs/tslibs/nattype.pxd @@ -17,4 +17,4 @@ cdef _NaT c_NaT cdef bint checknull_with_nat(object val) -cpdef bint is_null_datetimelike(object val) +cpdef bint is_null_datetimelike(object val, bint inat_is_null=*) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index df083f27ad653..a55d15a7c4e85 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -14,6 +14,8 @@ cimport numpy as cnp from numpy cimport int64_t cnp.import_array() +from pandas._libs.tslibs.np_datetime cimport ( + get_datetime64_value, get_timedelta64_value) cimport pandas._libs.tslibs.util as util from pandas._libs.tslibs.util cimport ( get_nat, is_integer_object, is_float_object, is_datetime64_object, @@ -686,26 +688,30 @@ cdef inline bint checknull_with_nat(object val): return val is None or util.is_nan(val) or val is c_NaT -cpdef bint is_null_datetimelike(object val): +cpdef bint is_null_datetimelike(object val, bint inat_is_null=True): """ Determine if we have a null for a timedelta/datetime (or integer versions) Parameters ---------- val : object + inat_is_null : bool, default True + Whether to treat integer iNaT value as null Returns ------- null_datetimelike : bool """ - if val is None or util.is_nan(val): + if val is None: return True elif val is c_NaT: return True + elif util.is_float_object(val) or util.is_complex_object(val): + return val != val elif util.is_timedelta64_object(val): - return val.view('int64') == NPY_NAT + return get_timedelta64_value(val) == NPY_NAT elif util.is_datetime64_object(val): - return val.view('int64') == NPY_NAT - elif util.is_integer_object(val): + return get_datetime64_value(val) == NPY_NAT + elif inat_is_null and util.is_integer_object(val): return val == NPY_NAT return False diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index 0ba61fcc58f46..ef7065a44f18b 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -215,7 +215,8 @@ cdef inline bint is_offset_object(object val): cdef inline bint is_nan(object val): """ - Check if val is a Not-A-Number float, including float('NaN') and np.nan. + Check if val is a Not-A-Number float or complex, including + float('NaN') and np.nan. Parameters ---------- @@ -225,4 +226,4 @@ cdef inline bint is_nan(object val): ------- is_nan : bool """ - return is_float_object(val) and val != val + return (is_float_object(val) or is_complex_object(val)) and val != val diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 965e5e000d026..d913d2ad299ce 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- from datetime import datetime +from decimal import Decimal from warnings import catch_warnings, filterwarnings, simplefilter import numpy as np import pytest from pandas._libs import missing as libmissing -from pandas._libs.tslib import iNaT +from pandas._libs.tslibs import iNaT, is_null_datetimelike from pandas.compat import u from pandas.core.dtypes.common import is_scalar @@ -392,3 +393,106 @@ def test_empty_like(self): expected = np.array([True]) self._check_behavior(arr, expected) + + +m8_units = ['as', 'ps', 'ns', 'us', 'ms', 's', + 'm', 'h', 'D', 'W', 'M', 'Y'] + +na_vals = [ + None, + NaT, + float('NaN'), + complex('NaN'), + np.nan, + np.float64('NaN'), + np.float32('NaN'), + np.complex64(np.nan), + np.complex128(np.nan), + np.datetime64('NaT'), + np.timedelta64('NaT'), +] + [ + np.datetime64('NaT', unit) for unit in m8_units +] + [ + np.timedelta64('NaT', unit) for unit in m8_units +] + +inf_vals = [ + float('inf'), + float('-inf'), + complex('inf'), + complex('-inf'), + np.inf, + np.NINF, +] + +int_na_vals = [ + # Values that match iNaT, which we treat as null in specific cases + np.int64(NaT.value), + int(NaT.value), +] + +sometimes_na_vals = [ + Decimal('NaN'), +] + +never_na_vals = [ + # float/complex values that when viewed as int64 match iNaT + -0.0, + np.float64('-0.0'), + -0j, + np.complex64(-0j), +] + + +class TestLibMissing(object): + def test_checknull(self): + for value in na_vals: + assert libmissing.checknull(value) + + for value in inf_vals: + assert not libmissing.checknull(value) + + for value in int_na_vals: + assert not libmissing.checknull(value) + + for value in sometimes_na_vals: + assert not libmissing.checknull(value) + + for value in never_na_vals: + assert not libmissing.checknull(value) + + def checknull_old(self): + for value in na_vals: + assert libmissing.checknull_old(value) + + for value in inf_vals: + assert libmissing.checknull_old(value) + + for value in int_na_vals: + assert not libmissing.checknull_old(value) + + for value in sometimes_na_vals: + assert not libmissing.checknull_old(value) + + for value in never_na_vals: + assert not libmissing.checknull_old(value) + + def test_is_null_datetimelike(self): + for value in na_vals: + assert is_null_datetimelike(value) + assert is_null_datetimelike(value, False) + + for value in inf_vals: + assert not is_null_datetimelike(value) + assert not is_null_datetimelike(value, False) + + for value in int_na_vals: + assert is_null_datetimelike(value) + assert not is_null_datetimelike(value, False) + + for value in sometimes_na_vals: + assert not is_null_datetimelike(value) + assert not is_null_datetimelike(value, False) + + for value in never_na_vals: + assert not is_null_datetimelike(value)
gets rid of is_null_datelike_scalar, which would, among other things, treat `np.float64('-0.0')` as `iNaT`. Some overlap with #24619, merge conflicts should be small or zero. - [x] closes #24607 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24628
2019-01-04T22:33:26Z
2019-01-05T17:52:55Z
2019-01-05T17:52:55Z
2019-01-05T17:58:51Z
Array api docs
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index a7557e6e1d1c2..972b562cfebba 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -657,7 +657,7 @@ Categoricals ------------ pandas can include categorical data in a ``DataFrame``. For full docs, see the -:ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. +:ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.arrays.categorical>`. .. ipython:: python diff --git a/doc/source/api/arrays.rst b/doc/source/api/arrays.rst new file mode 100644 index 0000000000000..d8ce2ab7bf73e --- /dev/null +++ b/doc/source/api/arrays.rst @@ -0,0 +1,401 @@ +{{ header }} + +.. _api.arrays: + +============= +Pandas Arrays +============= + +.. currentmodule:: pandas + +For most data types, pandas uses NumPy arrays as the concrete +objects contained with a :class:`Index`, :class:`Series`, or +:class:`DataFrame`. + +For some data types, pandas extends NumPy's type system. + +=================== ========================= ================== ============================= +Kind of Data Pandas Data Type Scalar Array +=================== ========================= ================== ============================= +TZ-aware datetime :class:`DatetimeTZDtype` :class:`Timestamp` :ref:`api.arrays.datetime` +Timedeltas (none) :class:`Timedelta` :ref:`api.arrays.timedelta` +Period (time spans) :class:`PeriodDtype` :class:`Period` :ref:`api.arrays.period` +Intervals :class:`IntervalDtype` :class:`Interval` :ref:`api.arrays.interval` +Nullable Integer :class:`Int64Dtype`, ... (none) :ref:`api.arrays.integer_na` +Categorical :class:`CategoricalDtype` (none) :ref:`api.arrays.categorical` +Sparse :class:`SparseDtype` (none) :ref:`api.arrays.sparse` +=================== ========================= ================== ============================= + +Pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`). +The top-level :meth:`array` method can be used to create a new array, which may be +stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFrame`. + +.. autosummary:: + :toctree: generated/ + + array + +.. _api.arrays.datetime: + +Datetime Data +------------- + +NumPy cannot natively represent timezone-aware datetimes. Pandas supports this +with the :class:`arrays.DatetimeArray` extension array, which can hold timezone-naive +or timezone-aware values. + +:class:`Timestamp`, a subclass of :class:`datetime.datetime`, is pandas' +scalar type for timezone-naive or timezone-aware datetime data. + +.. autosummary:: + :toctree: generated/ + + Timestamp + +Properties +~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Timestamp.asm8 + Timestamp.day + Timestamp.dayofweek + Timestamp.dayofyear + Timestamp.days_in_month + Timestamp.daysinmonth + Timestamp.fold + Timestamp.hour + Timestamp.is_leap_year + Timestamp.is_month_end + Timestamp.is_month_start + Timestamp.is_quarter_end + Timestamp.is_quarter_start + Timestamp.is_year_end + Timestamp.is_year_start + Timestamp.max + Timestamp.microsecond + Timestamp.min + Timestamp.minute + Timestamp.month + Timestamp.nanosecond + Timestamp.quarter + Timestamp.resolution + Timestamp.second + Timestamp.tz + Timestamp.tzinfo + Timestamp.value + Timestamp.week + Timestamp.weekofyear + Timestamp.year + +Methods +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Timestamp.astimezone + Timestamp.ceil + Timestamp.combine + Timestamp.ctime + Timestamp.date + Timestamp.day_name + Timestamp.dst + Timestamp.floor + Timestamp.freq + Timestamp.freqstr + Timestamp.fromordinal + Timestamp.fromtimestamp + Timestamp.isocalendar + Timestamp.isoformat + Timestamp.isoweekday + Timestamp.month_name + Timestamp.normalize + Timestamp.now + Timestamp.replace + Timestamp.round + Timestamp.strftime + Timestamp.strptime + Timestamp.time + Timestamp.timestamp + Timestamp.timetuple + Timestamp.timetz + Timestamp.to_datetime64 + Timestamp.to_julian_date + Timestamp.to_period + Timestamp.to_pydatetime + Timestamp.today + Timestamp.toordinal + Timestamp.tz_convert + Timestamp.tz_localize + Timestamp.tzname + Timestamp.utcfromtimestamp + Timestamp.utcnow + Timestamp.utcoffset + Timestamp.utctimetuple + Timestamp.weekday + +A collection of timestamps may be stored in a :class:`arrays.DatetimeArray`. +For timezone-aware data, the ``.dtype`` of a ``DatetimeArray`` is a +:class:`DatetimeTZDtype`. For timezone-naive data, ``np.dtype("datetime64[ns]")`` +is used. + +If the data are tz-aware, then every value in the array must have the same timezone. + +.. autosummary:: + :toctree: generated/ + + arrays.DatetimeArray + DatetimeTZDtype + +.. _api.arrays.timedelta: + +Timedelta Data +-------------- + +NumPy can natively represent timedeltas. Pandas provides :class:`Timedelta` +for symmetry with :class:`Timestamp`. + +.. autosummary:: + :toctree: generated/ + + Timedelta + +Properties +~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Timedelta.asm8 + Timedelta.components + Timedelta.days + Timedelta.delta + Timedelta.freq + Timedelta.is_populated + Timedelta.max + Timedelta.microseconds + Timedelta.min + Timedelta.nanoseconds + Timedelta.resolution + Timedelta.seconds + Timedelta.value + Timedelta.view + +Methods +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Timedelta.ceil + Timedelta.floor + Timedelta.isoformat + Timedelta.round + Timedelta.to_pytimedelta + Timedelta.to_timedelta64 + Timedelta.total_seconds + +A collection of timedeltas may be stored in a :class:`TimedeltaArray`. + +.. autosumarry:: + :toctree: generated/ + + arrays.TimedeltaArray + +.. _api.arrays.period: + +Timespan Data +------------- + +Pandas represents spans of times as :class:`Period` objects. + +Period +------ +.. autosummary:: + :toctree: generated/ + + Period + +Properties +~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Period.day + Period.dayofweek + Period.dayofyear + Period.days_in_month + Period.daysinmonth + Period.end_time + Period.freq + Period.freqstr + Period.hour + Period.is_leap_year + Period.minute + Period.month + Period.ordinal + Period.quarter + Period.qyear + Period.second + Period.start_time + Period.week + Period.weekday + Period.weekofyear + Period.year + +Methods +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Period.asfreq + Period.now + Period.strftime + Period.to_timestamp + +A collection of timedeltas may be stored in a :class:`arrays.PeriodArray`. +Every period in a ``PeriodArray`` must have the same ``freq``. + +.. autosummary:: + :toctree: generated/ + + arrays.DatetimeArray + PeriodDtype + +.. _api.arrays.interval: + +Interval Data +------------- + +Arbitrary intervals can be represented as :class:`Interval` objects. + +.. autosummary:: + :toctree: generated/ + + Interval + +Properties +~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Interval.closed + Interval.closed_left + Interval.closed_right + Interval.left + Interval.length + Interval.mid + Interval.open_left + Interval.open_right + Interval.overlaps + Interval.right + +A collection of intervals may be stored in an :class:`IntervalArray`. + +.. autosummary:: + :toctree: generated/ + + IntervalArray + IntervalDtype + +.. _api.arrays.integer_na: + +Nullable Integer +---------------- + +:class:`numpy.ndarray` cannot natively represent integer-data with missing values. +Pandas provides this through :class:`arrays.IntegerArray`. + +.. autosummary:: + :toctree: generated/ + + arrays.IntegerArray + Int8Dtype + Int16Dtype + Int32Dtype + Int64Dtype + UInt8Dtype + UInt16Dtype + UInt32Dtype + UInt64Dtype + +.. _api.arrays.categorical: + +Categorical Data +---------------- + +Pandas defines a custom data type for representing data that can take only a +limited, fixed set of values. The dtype of a ``Categorical`` can be described by +a :class:`pandas.api.types.CategoricalDtype`. + +.. autosummary:: + :toctree: generated/ + :template: autosummary/class_without_autosummary.rst + + api.types.CategoricalDtype + +.. autosummary:: + :toctree: generated/ + + api.types.CategoricalDtype.categories + api.types.CategoricalDtype.ordered + +Categorical data can be stored in a :class:`pandas.Categorical` + +.. autosummary:: + :toctree: generated/ + :template: autosummary/class_without_autosummary.rst + + Categorical + +The alternative :meth:`Categorical.from_codes` constructor can be used when you +have the categories and integer codes already: + +.. autosummary:: + :toctree: generated/ + + Categorical.from_codes + +The dtype information is available on the ``Categorical`` + +.. autosummary:: + :toctree: generated/ + + Categorical.dtype + Categorical.categories + Categorical.ordered + Categorical.codes + +``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts +the Categorical back to a NumPy array, so categories and order information is not preserved! + +.. autosummary:: + :toctree: generated/ + + Categorical.__array__ + +A ``Categorical`` can be stored in a ``Series`` or ``DataFrame``. +To create a Series of dtype ``category``, use ``cat = s.astype(dtype)`` or +``Series(..., dtype=dtype)`` where ``dtype`` is either + +* the string ``'category'`` +* an instance of :class:`~pandas.api.types.CategoricalDtype`. + +If the Series is of dtype ``CategoricalDtype``, ``Series.cat`` can be used to change the categorical +data. See :ref:`api.series.cat` for more. + +.. _api.arrays.sparse: + +Sparse Data +----------- + +Data where a single value is repeated many times (e.g. ``0`` or ``NaN``) may +be stored efficiently as a :class:`SparseArray`. + +.. autosummary:: + :toctree: generated/ + + SparseArray + SparseDtype + +The ``Series.sparse`` accessor may be used to access sparse-specific attributes +and methods if the :class:`Series` contains sparse values. See +:ref:`api.series.sparse` for more. diff --git a/doc/source/api/index.rst b/doc/source/api/index.rst index 0bd89fc826a21..e4d118e278128 100644 --- a/doc/source/api/index.rst +++ b/doc/source/api/index.rst @@ -26,9 +26,9 @@ public functions related to data types in pandas. general_functions series frame + arrays panel indexing - scalars offset_frequency window groupby diff --git a/doc/source/api/scalars.rst b/doc/source/api/scalars.rst index 662a4d5a8fcfe..e69de29bb2d1d 100644 --- a/doc/source/api/scalars.rst +++ b/doc/source/api/scalars.rst @@ -1,204 +0,0 @@ -{{ header }} - -.. _api.scalars: - -======= -Scalars -======= -.. currentmodule:: pandas - -Period ------- -.. autosummary:: - :toctree: generated/ - - Period - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Period.day - Period.dayofweek - Period.dayofyear - Period.days_in_month - Period.daysinmonth - Period.end_time - Period.freq - Period.freqstr - Period.hour - Period.is_leap_year - Period.minute - Period.month - Period.ordinal - Period.quarter - Period.qyear - Period.second - Period.start_time - Period.week - Period.weekday - Period.weekofyear - Period.year - -Methods -~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Period.asfreq - Period.now - Period.strftime - Period.to_timestamp - -Timestamp ---------- -.. autosummary:: - :toctree: generated/ - - Timestamp - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Timestamp.asm8 - Timestamp.day - Timestamp.dayofweek - Timestamp.dayofyear - Timestamp.days_in_month - Timestamp.daysinmonth - Timestamp.fold - Timestamp.hour - Timestamp.is_leap_year - Timestamp.is_month_end - Timestamp.is_month_start - Timestamp.is_quarter_end - Timestamp.is_quarter_start - Timestamp.is_year_end - Timestamp.is_year_start - Timestamp.max - Timestamp.microsecond - Timestamp.min - Timestamp.minute - Timestamp.month - Timestamp.nanosecond - Timestamp.quarter - Timestamp.resolution - Timestamp.second - Timestamp.tz - Timestamp.tzinfo - Timestamp.value - Timestamp.week - Timestamp.weekofyear - Timestamp.year - -Methods -~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Timestamp.astimezone - Timestamp.ceil - Timestamp.combine - Timestamp.ctime - Timestamp.date - Timestamp.day_name - Timestamp.dst - Timestamp.floor - Timestamp.freq - Timestamp.freqstr - Timestamp.fromordinal - Timestamp.fromtimestamp - Timestamp.isocalendar - Timestamp.isoformat - Timestamp.isoweekday - Timestamp.month_name - Timestamp.normalize - Timestamp.now - Timestamp.replace - Timestamp.round - Timestamp.strftime - Timestamp.strptime - Timestamp.time - Timestamp.timestamp - Timestamp.timetuple - Timestamp.timetz - Timestamp.to_datetime64 - Timestamp.to_julian_date - Timestamp.to_period - Timestamp.to_pydatetime - Timestamp.today - Timestamp.toordinal - Timestamp.tz_convert - Timestamp.tz_localize - Timestamp.tzname - Timestamp.utcfromtimestamp - Timestamp.utcnow - Timestamp.utcoffset - Timestamp.utctimetuple - Timestamp.weekday - -Interval --------- -.. autosummary:: - :toctree: generated/ - - Interval - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Interval.closed - Interval.closed_left - Interval.closed_right - Interval.left - Interval.length - Interval.mid - Interval.open_left - Interval.open_right - Interval.overlaps - Interval.right - -Timedelta ---------- -.. autosummary:: - :toctree: generated/ - - Timedelta - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Timedelta.asm8 - Timedelta.components - Timedelta.days - Timedelta.delta - Timedelta.freq - Timedelta.is_populated - Timedelta.max - Timedelta.microseconds - Timedelta.min - Timedelta.nanoseconds - Timedelta.resolution - Timedelta.seconds - Timedelta.value - Timedelta.view - -Methods -~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Timedelta.ceil - Timedelta.floor - Timedelta.isoformat - Timedelta.round - Timedelta.to_pytimedelta - Timedelta.to_timedelta64 - Timedelta.total_seconds diff --git a/doc/source/api/series.rst b/doc/source/api/series.rst index 7d5e6037b012a..f1238e9d3f2c3 100644 --- a/doc/source/api/series.rst +++ b/doc/source/api/series.rst @@ -278,14 +278,34 @@ Time series-related Series.tshift Series.slice_shift +Accessors +--------- + +Pandas provides dtype-specific methods under various accessors. +These are separate namespaces within :class:`Series` that only apply +to specific data types. + +=========================== ================================= +Data Type Accessor +=========================== ================================= +Datetime, Timedelta, Period :ref:`dt <api.series.dt>` +String :ref:`str <api.series.str>` +Categorical :ref:`cat <api.series.cat>` +Sparse :ref:`sparse <api.series.sparse>` +=========================== ================================= + +.. _api.series.dt: + Datetimelike Properties ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ + ``Series.dt`` can be used to access the values of the series as datetimelike and return several properties. These can be accessed like ``Series.dt.<property>``. Datetime Properties -~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^ + .. autosummary:: :toctree: generated/ :template: autosummary/accessor_attribute.rst @@ -320,7 +340,8 @@ Datetime Properties Series.dt.freq Datetime Methods -~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^ + .. autosummary:: :toctree: generated/ :template: autosummary/accessor_method.rst @@ -338,7 +359,8 @@ Datetime Methods Series.dt.day_name Period Properties -~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^ + .. autosummary:: :toctree: generated/ :template: autosummary/accessor_attribute.rst @@ -348,7 +370,8 @@ Period Properties Series.dt.end_time Timedelta Properties -~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^ + .. autosummary:: :toctree: generated/ :template: autosummary/accessor_attribute.rst @@ -360,7 +383,8 @@ Timedelta Properties Series.dt.components Timedelta Methods -~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^ + .. autosummary:: :toctree: generated/ :template: autosummary/accessor_method.rst @@ -368,8 +392,12 @@ Timedelta Methods Series.dt.to_pytimedelta Series.dt.total_seconds + +.. _api.series.str: + String handling ---------------- +~~~~~~~~~~~~~~~ + ``Series.str`` can be used to access the values of the series as strings and apply several methods to it. These can be accessed like ``Series.str.<function/property>``. @@ -445,82 +473,13 @@ strings and apply several methods to it. These can be accessed like Series.dt Index.str -.. _api.arrays: - -Arrays ------- -Pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`). - -.. autosummary:: - :toctree: generated/ - - array - -.. _api.categorical: - -Categorical -~~~~~~~~~~~ - -Pandas defines a custom data type for representing data that can take only a -limited, fixed set of values. The dtype of a ``Categorical`` can be described by -a :class:`pandas.api.types.CategoricalDtype`. - -.. autosummary:: - :toctree: generated/ - :template: autosummary/class_without_autosummary.rst - - api.types.CategoricalDtype - -.. autosummary:: - :toctree: generated/ - - api.types.CategoricalDtype.categories - api.types.CategoricalDtype.ordered - -Categorical data can be stored in a :class:`pandas.Categorical` - -.. autosummary:: - :toctree: generated/ - :template: autosummary/class_without_autosummary.rst - - Categorical - -The alternative :meth:`Categorical.from_codes` constructor can be used when you -have the categories and integer codes already: - -.. autosummary:: - :toctree: generated/ - - Categorical.from_codes - -The dtype information is available on the ``Categorical`` - -.. autosummary:: - :toctree: generated/ +.. _api.series.cat: - Categorical.dtype - Categorical.categories - Categorical.ordered - Categorical.codes - -``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts -the Categorical back to a NumPy array, so categories and order information is not preserved! - -.. autosummary:: - :toctree: generated/ - - Categorical.__array__ - -A ``Categorical`` can be stored in a ``Series`` or ``DataFrame``. -To create a Series of dtype ``category``, use ``cat = s.astype(dtype)`` or -``Series(..., dtype=dtype)`` where ``dtype`` is either - -* the string ``'category'`` -* an instance of :class:`~pandas.api.types.CategoricalDtype`. +Categorical Accessor +~~~~~~~~~~~~~~~~~~~~ -If the Series is of dtype ``CategoricalDtype``, ``Series.cat`` can be used to change the categorical -data. This accessor is similar to the ``Series.dt`` or ``Series.str`` and has the -following usable methods and properties: +Categorical-dtype specific methods and attributes are available under +the ``Series.cat`` accessor. .. autosummary:: :toctree: generated/ @@ -543,6 +502,31 @@ following usable methods and properties: Series.cat.as_ordered Series.cat.as_unordered + +.. _api.series.sparse: + +Sparse Accessor +~~~~~~~~~~~~~~~ + +Sparse-dtype specific methods and attributes are provided under the +``Series.sparse`` accessor. + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_attribute.rst + + Series.sparse.npoints + Series.sparse.density + Series.sparse.fill_value + Series.sparse.sp_values + +.. autosummary:: + :toctree: generated/ + + Series.sparse.from_coo + Series.sparse.to_coo + + Plotting -------- ``Series.plot`` is both a callable method and a namespace attribute for @@ -594,25 +578,13 @@ Serialization / IO / Conversion Series.to_clipboard Series.to_latex + Sparse ------ + .. autosummary:: :toctree: generated/ SparseSeries.to_coo SparseSeries.from_coo -.. autosummary:: - :toctree: generated/ - :template: autosummary/accessor_attribute.rst - - Series.sparse.npoints - Series.sparse.density - Series.sparse.fill_value - Series.sparse.sp_values - -.. autosummary:: - :toctree: generated/ - - Series.sparse.from_coo - Series.sparse.to_coo diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 73ae26150b946..13681485d2f69 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1947,7 +1947,7 @@ documentation sections for more on each type. =================== ========================= ================== ============================= ============================= Kind of Data Data Type Scalar Array Documentation =================== ========================= ================== ============================= ============================= -tz-aware datetime :class:`DatetimeArray` :class:`Timestamp` :class:`arrays.DatetimeArray` :ref:`timeseries.timezone` +tz-aware datetime :class:`DatetimeTZDtype` :class:`Timestamp` :class:`arrays.DatetimeArray` :ref:`timeseries.timezone` Categorical :class:`CategoricalDtype` (none) :class:`Categorical` :ref:`categorical` period (time spans) :class:`PeriodDtype` :class:`Period` :class:`arrays.PeriodArray` :ref:`timeseries.periods` sparse :class:`SparseDtype` (none) :class:`arrays.SparseArray` :ref:`sparse` diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 68e39e68220a7..a6315c548b382 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -34,7 +34,7 @@ The categorical data type is useful in the following cases: * As a signal to other Python libraries that this column should be treated as a categorical variable (e.g. to use suitable statistical methods or plot types). -See also the :ref:`API docs on categoricals<api.categorical>`. +See also the :ref:`API docs on categoricals<api.arrays.categorical>`. .. _categorical.objectcreation: diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst index a0143d717105c..dfd388125708e 100644 --- a/doc/source/comparison_with_r.rst +++ b/doc/source/comparison_with_r.rst @@ -512,7 +512,7 @@ In pandas this is accomplished with ``pd.cut`` and ``astype("category")``: pd.Series([1, 2, 3, 2, 2, 3]).astype("category") For more details and examples see :ref:`categorical introduction <categorical>` and the -:ref:`API documentation <api.categorical>`. There is also a documentation regarding the +:ref:`API documentation <api.arrays.categorical>`. There is also a documentation regarding the :ref:`differences to R's factor <categorical.rfactor>`. diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index a37aa2644a805..953f40d1afebe 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -1351,7 +1351,7 @@ important than their content, or as input to an algorithm which only accepts the integer encoding. (For more information about support in pandas for full categorical data, see the :ref:`Categorical introduction <categorical>` and the -:ref:`API documentation <api.categorical>`.) +:ref:`API documentation <api.arrays.categorical>`.) .. ipython:: python diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index 6f74f0393d123..420125afd29a4 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -72,7 +72,7 @@ methods to manipulate. Thanks to Jan Schulz for much of this API/implementation. :issue:`8075`, :issue:`8076`, :issue:`8143`, :issue:`8453`, :issue:`8518`). For full docs, see the :ref:`categorical introduction <categorical>` and the -:ref:`API documentation <api.categorical>`. +:ref:`API documentation <api.arrays.categorical>`. .. ipython:: python :okwarning: @@ -101,7 +101,7 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the - The ``Categorical.labels`` attribute was renamed to ``Categorical.codes`` and is read only. If you want to manipulate codes, please use one of the - :ref:`API methods on Categoricals <api.categorical>`. + :ref:`API methods on Categoricals <api.arrays.categorical>`. - The ``Categorical.levels`` attribute is renamed to ``Categorical.categories``. diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index 9b2240eb62906..32c08e40b8033 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -47,13 +47,13 @@ def array(data, # type: Sequence[object] Currently, pandas will infer an extension dtype for sequences of ============================== ===================================== - scalar type Array Type - ============================= ===================================== - * :class:`pandas.Interval` :class:`pandas.IntervalArray` - * :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` - * :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` - * :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` - ============================= ===================================== + Scalar Type Array Type + ============================== ===================================== + :class:`pandas.Interval` :class:`pandas.IntervalArray` + :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` + :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` + :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` + ============================== ===================================== For all other cases, NumPy's usual inference rules will be used. diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 7c8f58c9a3203..6114e578dc90f 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -56,19 +56,19 @@ class SparseDtype(ExtensionDtype): ---------- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 The dtype of the underlying array storing the non-fill value values. - fill_value : scalar, optional. + fill_value : scalar, optional The scalar value not stored in the SparseArray. By default, this depends on `dtype`. - ========== ========== - dtype na_value - ========== ========== - float ``np.nan`` - int ``0`` - bool ``False`` - datetime64 ``pd.NaT`` + =========== ========== + dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool ``False`` + datetime64 ``pd.NaT`` timedelta64 ``pd.NaT`` - ========== ========== + =========== ========== The default value may be overridden by specifying a `fill_value`. """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a50def7357826..7659f0696008b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6966,6 +6966,11 @@ def corr(self, method='pearson', min_periods=1): ------- y : DataFrame + See Also + -------- + DataFrame.corrwith + Series.corr + Examples -------- >>> histogram_intersection = lambda a, b: np.minimum(a, b @@ -6976,11 +6981,6 @@ def corr(self, method='pearson', min_periods=1): dogs cats dogs 1.0 0.3 cats 0.3 1.0 - - See Also - ------- - DataFrame.corrwith - Series.corr """ numeric_df = self._get_numeric_data() cols = numeric_df.columns diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d0555bd2e44b1..b3c14bac91f17 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9223,7 +9223,10 @@ def _tz_convert(ax, tz): def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): """ - Localize tz-naive TimeSeries to target time zone. + Localize tz-naive index of a Series or DataFrame to target time zone. + + This operation localizes the Index. To localize the values in a + timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- @@ -9250,10 +9253,9 @@ def tz_localize(self, tz, axis=0, level=None, copy=True, - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times - nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, - default 'raise' + nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone - where clocks moved forward due to DST. + where clocks moved forward due to DST. Valid valuse are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time @@ -9268,6 +9270,8 @@ def tz_localize(self, tz, axis=0, level=None, copy=True, Returns ------- + Series or DataFrame + Same type as the input. Raises ------
Closes https://github.com/pandas-dev/pandas/issues/24066 cc @datapythonista @jorisvandenbossche there are a few closely related changes here. * removed api.scalars. I think it's more useful to present "Here are all pandas extensions", and then sub-divide into array / scalar / dtype. I think arrays are the most prominent of these, so I I've named the file `api/arrays.rst` * Moved `pd.array` to api/arrays.rst * Moved Categorical from api/series.rst to api/arrays.rst * Re-written the accessor section and moved / left those in api/series.rst * Added DatetimeArray (the original point of this PR) --- I have quite a few warnings to cleanup, but wanted a +1 on the general reorganization before putting more time into it.
https://api.github.com/repos/pandas-dev/pandas/pulls/24626
2019-01-04T21:30:36Z
2019-01-05T14:52:43Z
2019-01-05T14:52:43Z
2019-01-05T14:52:47Z
remove eadata
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 082a314facdd6..5a8809f754385 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -73,34 +73,30 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): DatetimeLikeArrayMixin._maybe_mask_results) __iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__) - @property - def _eadata(self): - return self._data - @property def freq(self): """ Return the frequency object if it is set, otherwise None. """ - return self._eadata.freq + return self._data.freq @freq.setter def freq(self, value): - # validation is handled by _eadata setter - self._eadata.freq = value + # validation is handled by _data setter + self._data.freq = value @property def freqstr(self): """ Return the frequency object as a string if it is set, otherwise None. """ - return self._eadata.freqstr + return self._data.freqstr def unique(self, level=None): if level is not None: self._validate_index_level(level) - result = self._eadata.unique() + result = self._data.unique() # Note: if `self` is already unique, then self.unique() should share # a `freq` with self. If not already unique, then self.freq must be @@ -113,7 +109,7 @@ def _create_comparison_method(cls, op): Create a comparison method that dispatches to ``cls.values``. """ def wrapper(self, other): - result = op(self._eadata, maybe_unwrap_index(other)) + result = op(self._data, maybe_unwrap_index(other)) return result wrapper.__doc__ = op.__doc__ @@ -122,7 +118,7 @@ def wrapper(self, other): @property def _ndarray_values(self): - return self._eadata._ndarray_values + return self._data._ndarray_values # ------------------------------------------------------------------------ # Abstract data attributes @@ -131,12 +127,12 @@ def _ndarray_values(self): def values(self): # type: () -> np.ndarray # Note: PeriodArray overrides this to return an ndarray of objects. - return self._eadata._data + return self._data._data @property @Appender(DatetimeLikeArrayMixin.asi8.__doc__) def asi8(self): - return self._eadata.asi8 + return self._data.asi8 # ------------------------------------------------------------------------ @@ -485,7 +481,7 @@ def _add_datetimelike_methods(cls): def __add__(self, other): # dispatch to ExtensionArray implementation - result = self._eadata.__add__(maybe_unwrap_index(other)) + result = self._data.__add__(maybe_unwrap_index(other)) return wrap_arithmetic_op(self, other, result) cls.__add__ = __add__ @@ -497,13 +493,13 @@ def __radd__(self, other): def __sub__(self, other): # dispatch to ExtensionArray implementation - result = self._eadata.__sub__(maybe_unwrap_index(other)) + result = self._data.__sub__(maybe_unwrap_index(other)) return wrap_arithmetic_op(self, other, result) cls.__sub__ = __sub__ def __rsub__(self, other): - result = self._eadata.__rsub__(maybe_unwrap_index(other)) + result = self._data.__rsub__(maybe_unwrap_index(other)) return wrap_arithmetic_op(self, other, result) cls.__rsub__ = __rsub__ @@ -534,7 +530,6 @@ def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) freq = self.freq if is_period_dtype(self) else None return self._shallow_copy(self.asi8.repeat(repeats), freq=freq) - # TODO: dispatch to _eadata @Appender(_index_shared_docs['where'] % _index_doc_kwargs) def where(self, cond, other=None): @@ -599,10 +594,10 @@ def astype(self, dtype, copy=True): # Ensure that self.astype(self.dtype) is self return self - new_values = self._eadata.astype(dtype, copy=copy) + new_values = self._data.astype(dtype, copy=copy) # pass copy=False because any copying will be done in the - # _eadata.astype call above + # _data.astype call above return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False) @@ -637,7 +632,7 @@ def shift(self, periods, freq=None): Index.shift : Shift values of Index. PeriodIndex.shift : Shift values of PeriodIndex. """ - result = self._eadata._time_shift(periods, freq=freq) + result = self._data._time_shift(periods, freq=freq) return type(self)(result, name=self.name) @@ -675,9 +670,6 @@ def maybe_unwrap_index(obj): unwrapped object """ if isinstance(obj, ABCIndexClass): - if isinstance(obj, DatetimeIndexOpsMixin): - # i.e. PeriodIndex/DatetimeIndex/TimedeltaIndex - return obj._eadata return obj._data return obj @@ -712,16 +704,16 @@ def _delegate_class(self): raise AbstractMethodError def _delegate_property_get(self, name, *args, **kwargs): - result = getattr(self._eadata, name) + result = getattr(self._data, name) if name not in self._raw_properties: result = Index(result, name=self.name) return result def _delegate_property_set(self, name, value, *args, **kwargs): - setattr(self._eadata, name, value) + setattr(self._data, name, value) def _delegate_method(self, name, *args, **kwargs): - result = operator.methodcaller(name, *args, **kwargs)(self._eadata) + result = operator.methodcaller(name, *args, **kwargs)(self._data) if name not in self._raw_methods: result = Index(result, name=self.name) return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f396f081267b3..0201827d2f886 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -341,12 +341,12 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): @property def dtype(self): - return self._eadata.dtype + return self._data.dtype @property def tz(self): # GH 18595 - return self._eadata.tz + return self._data.tz @tz.setter def tz(self, value): @@ -475,7 +475,7 @@ def union(self, other): if isinstance(result, DatetimeIndex): # TODO: we shouldn't be setting attributes like this; # in all the tests this equality already holds - result._eadata._dtype = this.dtype + result._data._dtype = this.dtype if (result.freq is None and (this.freq is not None or other.freq is not None)): result.freq = to_offset(result.inferred_freq) @@ -508,7 +508,7 @@ def union_many(self, others): if isinstance(this, DatetimeIndex): # TODO: we shouldn't be setting attributes like this; # in all the tests this equality already holds - this._eadata._dtype = dtype + this._data._dtype = dtype return this def _can_fast_union(self, other): @@ -643,7 +643,7 @@ def intersection(self, other): def _get_time_micros(self): values = self.asi8 if self.tz is not None and not timezones.is_utc(self.tz): - values = self._eadata._local_timestamps() + values = self._data._local_timestamps() return fields.get_time_micros(values) def to_series(self, keep_tz=None, index=None, name=None): @@ -1139,7 +1139,7 @@ def offset(self, value): self.freq = value def __getitem__(self, key): - result = self._eadata.__getitem__(key) + result = self._data.__getitem__(key) if is_scalar(result): return result elif result.ndim > 1: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 9301638d4f632..b9d6b8da2cada 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -36,7 +36,7 @@ def _make_wrapped_arith_op(opname): meth = getattr(TimedeltaArray, opname) def method(self, other): - result = meth(self._eadata, maybe_unwrap_index(other)) + result = meth(self._data, maybe_unwrap_index(other)) return wrap_arithmetic_op(self, other, result) method.__name__ = opname @@ -307,7 +307,7 @@ def _box_func(self): return lambda x: Timedelta(x, unit='ns') def __getitem__(self, key): - result = self._eadata.__getitem__(key) + result = self._data.__getitem__(key) if is_scalar(result): return result return type(self)(result, name=self.name) @@ -321,7 +321,7 @@ def astype(self, dtype, copy=True): # Have to repeat the check for 'timedelta64' (not ns) dtype # so that we can return a numeric index, since pandas will return # a TimedeltaIndex when dtype='timedelta' - result = self._eadata.astype(dtype, copy=copy) + result = self._data.astype(dtype, copy=copy) if self.hasnans: return Index(result, name=self.name) return Index(result.astype('i8'), name=self.name) diff --git a/pandas/core/series.py b/pandas/core/series.py index de34227cda28a..b94bd80a7aee3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -477,10 +477,7 @@ def _values(self): """ Return the internal repr of this data. """ - result = self._data.internal_values() - if isinstance(result, DatetimeIndex): - result = result._eadata - return result + return self._data.internal_values() def _formatting_values(self): """ diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 4474b06b19536..c31d7acad3111 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1475,7 +1475,7 @@ def test_tdi_rmul_arraylike(self, other, box_with_array): tdi = TimedeltaIndex(['1 Day'] * 10) expected = timedelta_range('1 days', '10 days') - expected._eadata.freq = None + expected._data.freq = None tdi = tm.box_expected(tdi, box) expected = tm.box_expected(expected, xbox) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8890593b1fa9d..6ec3b97bb1450 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -21,7 +21,7 @@ def test_from_pandas_array(self): result = DatetimeArray._from_sequence(arr, freq='infer') - expected = pd.date_range('1970-01-01', periods=5, freq='H')._eadata + expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 562be4cf85864..c03b8afbe79bf 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -318,8 +318,7 @@ def test_astype_category(self, tz): pd.Timestamp('2000-01-02', tz=tz)]) tm.assert_index_equal(result, expected) - # TODO: use \._data following composition changeover - result = obj._eadata.astype('category') + result = obj._data.astype('category') expected = expected.values tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index 3f5507612c8e6..23e96dbc3d6ce 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -95,8 +95,7 @@ def test_astype_category(self): pd.Timedelta('2H')]) tm.assert_index_equal(result, expected) - # TODO: Use \._data following composition changeover - result = obj._eadata.astype('category') + result = obj._data.astype('category') expected = expected.values tm.assert_categorical_equal(result, expected)
Closes https://github.com/pandas-dev/pandas/issues/24565
https://api.github.com/repos/pandas-dev/pandas/pulls/24625
2019-01-04T20:52:15Z
2019-01-05T12:35:11Z
2019-01-05T12:35:10Z
2019-01-05T14:20:28Z
REF/TST: use pytest builtin monkeypatch fixture and remove mock fixture
diff --git a/pandas/conftest.py b/pandas/conftest.py index 30b24e00779a9..35a6b5df35ddc 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1,6 +1,5 @@ from datetime import date, time, timedelta from decimal import Decimal -import importlib import os from dateutil.tz import tzlocal, tzutc @@ -637,20 +636,6 @@ def any_skipna_inferred_dtype(request): return inferred_dtype, values -@pytest.fixture -def mock(): - """ - Fixture providing the 'mock' module. - - Uses 'unittest.mock' for Python 3. Attempts to import the 3rd party 'mock' - package for Python 2, skipping if not present. - """ - if PY3: - return importlib.import_module("unittest.mock") - else: - return pytest.importorskip("mock") - - @pytest.fixture(params=[getattr(pd.offsets, o) for o in pd.offsets.__all__ if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)]) def tick_classes(request): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index f58cb362cd6d2..89662b70a39ad 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -197,7 +197,7 @@ def __contains__(self, key): assert result is expected -def test_is_file_like(mock): +def test_is_file_like(): class MockFile(object): pass @@ -235,7 +235,6 @@ class MockFile(object): # Iterator but no read / write attributes data = [1, 2, 3] assert not is_file(data) - assert not is_file(mock.Mock()) @pytest.mark.parametrize( diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index c979894048127..d175f669703c7 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -305,16 +305,14 @@ def test_repr_non_interactive(self): assert not has_truncated_repr(df) assert not has_expanded_repr(df) - def test_repr_truncates_terminal_size(self, mock): - # https://github.com/pandas-dev/pandas/issues/21180 - # TODO: use mock fixutre. - # This is being backported, so doing it directly here. + def test_repr_truncates_terminal_size(self, monkeypatch): + # see gh-21180 terminal_size = (118, 96) - p1 = mock.patch('pandas.io.formats.console.get_terminal_size', - return_value=terminal_size) - p2 = mock.patch('pandas.io.formats.format.get_terminal_size', - return_value=terminal_size) + monkeypatch.setattr('pandas.io.formats.console.get_terminal_size', + lambda: terminal_size) + monkeypatch.setattr('pandas.io.formats.format.get_terminal_size', + lambda: terminal_size) index = range(5) columns = pd.MultiIndex.from_tuples([ @@ -323,8 +321,7 @@ def test_repr_truncates_terminal_size(self, mock): ]) df = pd.DataFrame(1, index=index, columns=columns) - with p1, p2: - result = repr(df) + result = repr(df) h1, h2 = result.split('\n')[:2] assert 'long' in h1 @@ -334,21 +331,19 @@ def test_repr_truncates_terminal_size(self, mock): # regular columns df2 = pd.DataFrame({"A" * 41: [1, 2], 'B' * 41: [1, 2]}) - with p1, p2: - result = repr(df2) + result = repr(df2) assert df2.columns[0] in result.split('\n')[0] - def test_repr_truncates_terminal_size_full(self, mock): + def test_repr_truncates_terminal_size_full(self, monkeypatch): # GH 22984 ensure entire window is filled terminal_size = (80, 24) df = pd.DataFrame(np.random.rand(1, 7)) - p1 = mock.patch('pandas.io.formats.console.get_terminal_size', - return_value=terminal_size) - p2 = mock.patch('pandas.io.formats.format.get_terminal_size', - return_value=terminal_size) - with p1, p2: - assert "..." not in str(df) + monkeypatch.setattr('pandas.io.formats.console.get_terminal_size', + lambda: terminal_size) + monkeypatch.setattr('pandas.io.formats.format.get_terminal_size', + lambda: terminal_size) + assert "..." not in str(df) def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 2dc4c578102bb..b1547181350bc 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1814,13 +1814,16 @@ class InvalidBuffer(object): parser.read_csv(InvalidBuffer()) -def test_invalid_file_buffer_mock(all_parsers, mock): +def test_invalid_file_buffer_mock(all_parsers): # see gh-15337 parser = all_parsers msg = "Invalid file path or buffer object type" + class Foo(): + pass + with pytest.raises(ValueError, match=msg): - parser.read_csv(mock.Mock()) + parser.read_csv(Foo()) def test_valid_file_buffer_seems_invalid(all_parsers): diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index 12b082c3d4099..ec0631e748dfc 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -17,43 +17,51 @@ def test_is_gcs_url(): @td.skip_if_no('gcsfs') -def test_read_csv_gcs(mock): +def test_read_csv_gcs(monkeypatch): df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'], 'dt': date_range('2018-06-18', periods=2)}) - with mock.patch('gcsfs.GCSFileSystem') as MockFileSystem: - instance = MockFileSystem.return_value - instance.open.return_value = StringIO(df1.to_csv(index=False)) - df2 = read_csv('gs://test/test.csv', parse_dates=['dt']) + + class MockGCSFileSystem(): + def open(*args): + return StringIO(df1.to_csv(index=False)) + + monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem) + df2 = read_csv('gs://test/test.csv', parse_dates=['dt']) assert_frame_equal(df1, df2) @td.skip_if_no('gcsfs') -def test_to_csv_gcs(mock): +def test_to_csv_gcs(monkeypatch): df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'], 'dt': date_range('2018-06-18', periods=2)}) - with mock.patch('gcsfs.GCSFileSystem') as MockFileSystem: - s = StringIO() - instance = MockFileSystem.return_value - instance.open.return_value = s + s = StringIO() + + class MockGCSFileSystem(): + def open(*args): + return s - df1.to_csv('gs://test/test.csv', index=True) - df2 = read_csv(StringIO(s.getvalue()), parse_dates=['dt'], index_col=0) + monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem) + df1.to_csv('gs://test/test.csv', index=True) + df2 = read_csv(StringIO(s.getvalue()), parse_dates=['dt'], index_col=0) assert_frame_equal(df1, df2) @td.skip_if_no('gcsfs') -def test_gcs_get_filepath_or_buffer(mock): +def test_gcs_get_filepath_or_buffer(monkeypatch): df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'], 'dt': date_range('2018-06-18', periods=2)}) - with mock.patch('pandas.io.gcs.get_filepath_or_buffer') as MockGetFilepath: - MockGetFilepath.return_value = (StringIO(df1.to_csv(index=False)), - None, None, False) - df2 = read_csv('gs://test/test.csv', parse_dates=['dt']) + + def mock_get_filepath_or_buffer(*args, **kwargs): + return (StringIO(df1.to_csv(index=False)), + None, None, False) + + monkeypatch.setattr('pandas.io.gcs.get_filepath_or_buffer', + mock_get_filepath_or_buffer) + df2 = read_csv('gs://test/test.csv', parse_dates=['dt']) assert_frame_equal(df1, df2) - assert MockGetFilepath.called @pytest.mark.skipif(td.safe_import('gcsfs'), diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 436ccef48ae12..0e7672f4e2f9d 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2988,21 +2988,21 @@ def test_secondary_axis_font_size(self, method): self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize) - def test_misc_bindings(self, mock): + def test_misc_bindings(self, monkeypatch): df = pd.DataFrame(randn(10, 10), columns=list('abcdefghij')) - p1 = mock.patch('pandas.plotting._misc.scatter_matrix', - return_value=2) - p2 = mock.patch('pandas.plotting._misc.andrews_curves', - return_value=2) - p3 = mock.patch('pandas.plotting._misc.parallel_coordinates', - return_value=2) - p4 = mock.patch('pandas.plotting._misc.radviz', - return_value=2) - with p1, p2, p3, p4: - assert df.plot.scatter_matrix() == 2 - assert df.plot.andrews_curves('a') == 2 - assert df.plot.parallel_coordinates('a') == 2 - assert df.plot.radviz('a') == 2 + monkeypatch.setattr('pandas.plotting._misc.scatter_matrix', + lambda x: 2) + monkeypatch.setattr('pandas.plotting._misc.andrews_curves', + lambda x, y: 2) + monkeypatch.setattr('pandas.plotting._misc.parallel_coordinates', + lambda x, y: 2) + monkeypatch.setattr('pandas.plotting._misc.radviz', + lambda x, y: 2) + + assert df.plot.scatter_matrix() == 2 + assert df.plot.andrews_curves('a') == 2 + assert df.plot.parallel_coordinates('a') == 2 + assert df.plot.radviz('a') == 2 def _generate_4_axes_via_gridspec(): diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 39f8f2f44fda0..1e223c20f55b7 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -878,18 +878,18 @@ def test_custom_business_day_freq(self): _check_plot_works(s.plot) - def test_misc_bindings(self, mock): + def test_misc_bindings(self, monkeypatch): s = Series(randn(10)) - p1 = mock.patch('pandas.plotting._misc.lag_plot', - return_value=2) - p2 = mock.patch('pandas.plotting._misc.autocorrelation_plot', - return_value=2) - p3 = mock.patch('pandas.plotting._misc.bootstrap_plot', - return_value=2) - with p1, p2, p3: - assert s.plot.lag() == 2 - assert s.plot.autocorrelation() == 2 - assert s.plot.bootstrap() == 2 + monkeypatch.setattr('pandas.plotting._misc.lag_plot', + lambda x: 2) + monkeypatch.setattr('pandas.plotting._misc.autocorrelation_plot', + lambda x: 2) + monkeypatch.setattr('pandas.plotting._misc.bootstrap_plot', + lambda x: 2) + + assert s.plot.lag() == 2 + assert s.plot.autocorrelation() == 2 + assert s.plot.bootstrap() == 2 @pytest.mark.xfail def test_plot_accessor_updates_on_inplace(self):
- [n/a ] xref https://github.com/pandas-dev/pandas/pull/24557#issuecomment-451174496 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [n/a ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24624
2019-01-04T19:39:01Z
2019-01-05T14:52:20Z
2019-01-05T14:52:20Z
2019-01-05T21:04:24Z
EA: revert treatment of i8values
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index cfb697b3c357a..73e799f9e0a36 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -606,7 +606,7 @@ def _concat_same_type(cls, to_concat): def copy(self, deep=False): values = self.asi8.copy() - return type(self)(values, dtype=self.dtype, freq=self.freq) + return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq) def _values_for_factorize(self): return self.asi8, iNaT diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index efa1757a989fc..2f7cd3768b6ab 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -50,6 +50,7 @@ def tz_to_dtype(tz): if tz is None: return _NS_DTYPE else: + tz = timezones.tz_standardize(tz) return DatetimeTZDtype(tz=tz) @@ -254,77 +255,53 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, _dtype = None # type: Union[np.dtype, DatetimeTZDtype] _freq = None - def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): - if isinstance(values, (ABCSeries, ABCIndexClass)): - values = values._values - - if isinstance(values, type(self)): - # validation - dtz = getattr(dtype, 'tz', None) - if dtz and values.tz is None: - dtype = DatetimeTZDtype(tz=dtype.tz) - elif dtz and values.tz: - if not timezones.tz_compare(dtz, values.tz): - msg = ( - "Timezone of the array and 'dtype' do not match. " - "'{}' != '{}'" - ) - raise TypeError(msg.format(dtz, values.tz)) - elif values.tz: - dtype = values.dtype - # freq = validate_values_freq(values, freq) - if freq is None: - freq = values.freq - values = values._data + def __init__(self, values, dtype=None, freq=None, copy=False): + if freq == "infer": + raise ValueError( + "Frequency inference not allowed in DatetimeArray.__init__. " + "Use 'pd.array()' instead.") - if not isinstance(values, np.ndarray): - msg = ( - "Unexpected type '{}'. 'values' must be a DatetimeArray " + if not hasattr(values, "dtype"): + # e.g. list + raise ValueError( + "Unexpected type '{vals}'. 'values' must be a DatetimeArray " "ndarray, or Series or Index containing one of those." - ) - raise ValueError(msg.format(type(values).__name__)) - - if values.dtype == 'i8': - # for compat with datetime/timedelta/period shared methods, - # we can sometimes get here with int64 values. These represent - # nanosecond UTC (or tz-naive) unix timestamps - values = values.view(_NS_DTYPE) - - if values.dtype != _NS_DTYPE: - msg = ( + .format(vals=type(values).__name__)) + + if is_datetime64_dtype(values.dtype) and hasattr(dtype, "tz"): + # cast to make _from_sequence treat as unix instead of wall-times; + # see GH#24559 + values = type(self)._simple_new( + np.asarray(values), + freq=getattr(values, "freq", None), + dtype=tz_to_dtype(utc)).tz_convert(dtype.tz) + + elif not (is_datetime64tz_dtype(values.dtype) or + is_datetime64_dtype(values.dtype) or + values.dtype == 'i8'): + raise ValueError( "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'." - " Got {} instead." - ) - raise ValueError(msg.format(values.dtype)) + " Got {dtype} instead." .format(dtype=values.dtype)) - dtype = _validate_dt64_dtype(dtype) - - if freq == "infer": - msg = ( - "Frequency inference not allowed in DatetimeArray.__init__. " - "Use 'pd.array()' instead." - ) - raise ValueError(msg) - - if copy: - values = values.copy() - if freq: - freq = to_offset(freq) - if getattr(dtype, 'tz', None): - # https://github.com/pandas-dev/pandas/issues/18595 - # Ensure that we have a standard timezone for pytz objects. - # Without this, things like adding an array of timedeltas and - # a tz-aware Timestamp (with a tz specific to its datetime) will - # be incorrect(ish?) for the array as a whole - dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz)) - - self._data = values - self._dtype = dtype - self._freq = freq + arr = type(self)._from_sequence(values, dtype=dtype, + freq=freq, copy=copy) + self._data = arr._data + self._freq = arr._freq + self._dtype = arr._dtype @classmethod - def _simple_new(cls, values, freq=None, dtype=None): - return cls(values, freq=freq, dtype=dtype) + def _simple_new(cls, values, freq=None, dtype=_NS_DTYPE): + """ + we require the we have a dtype compat for the values + if we are passed a non-dtype compat, then coerce using the constructor + """ + assert isinstance(values, np.ndarray), type(values) + + result = object.__new__(cls) + result._data = values.view('datetime64[ns]') + result._freq = freq + result._dtype = dtype + return result @classmethod def _from_sequence(cls, data, dtype=None, copy=False, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7bbbdd70e062e..6302e31510d2f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4377,7 +4377,8 @@ def _maybe_casted_values(index, labels=None): values, mask, np.nan) if issubclass(values_type, DatetimeLikeArray): - values = values_type(values, dtype=values_dtype) + values = values_type._simple_new(values, + dtype=values_dtype) return values diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 664ca9c5d2f05..aa61632441906 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -314,18 +314,20 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): if isinstance(values, DatetimeArray): if tz: tz = validate_tz_from_dtype(dtype, tz) + tz = timezones.tz_standardize(tz) dtype = DatetimeTZDtype(tz=tz) elif dtype is None: - dtype = _NS_DTYPE + dtype = values.dtype values = DatetimeArray(values, freq=freq, dtype=dtype) tz = values.tz freq = values.freq values = values._data + else: + tz = tz or getattr(dtype, 'tz', None) # DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes - if isinstance(values, DatetimeIndex): - values = values._data + assert isinstance(values, np.ndarray) dtype = tz_to_dtype(tz) dtarr = DatetimeArray._simple_new(values, freq=freq, dtype=dtype) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 70e4f44cb5de8..dcc28ef9dec1d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -3078,7 +3078,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values): # TODO: This is no longer hit internally; does it need to be retained # for e.g. pyarrow? - values = DatetimeArray(values, dtype) + values = DatetimeArray(values.view('i8'), dtype) return klass(values, ndim=ndim, placement=placement) diff --git a/pandas/io/packers.py b/pandas/io/packers.py index b83eab7d0eba0..c2a495cf6eaf2 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -656,7 +656,7 @@ def create_block(b): if is_datetime64tz_dtype(b[u'dtype']): assert isinstance(values, np.ndarray), type(values) assert values.dtype == 'M8[ns]', values.dtype - values = DatetimeArray(values, dtype=b[u'dtype']) + values = DatetimeArray(values.view('i8'), dtype=b[u'dtype']) return make_block(values=values, klass=getattr(internals, b[u'klass']), diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index f234e4fadec61..997e664d484d0 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -340,7 +340,7 @@ def test_from_array_keeps_base(self): arr = np.array(['2000-01-01', '2000-01-02'], dtype='M8[ns]') dta = DatetimeArray(arr) - assert dta._data is arr + assert dta._data.base is arr dta = DatetimeArray(arr[:0]) assert dta._data.base is arr diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8228ed7652fea..32cafad584d01 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -16,6 +16,69 @@ class TestDatetimeArrayConstructor(object): + + @pytest.mark.parametrize('tz', [None, 'Asia/Singapore']) + def test_constructor_equivalence(self, tz): + # GH#24623 check that DatetimeArray.__init__ behavior matches: + # Timestamp.__new__ for int64 + # DatetimeArray._from_sequence for int64, datetime64[ns] + # DatetimeArray._simple_new for int64 + # DatetimeIndex.__new__ for int64, datetime64[ns] + # DatetimeIndex._simple_new for int64, datetime64[ns] + # + # and that DatetimeArray._simple_new behaves like + # DatetimeIndex._simple_new for both int64 and datetime64[ns] inputs + arr = np.random.randint(-10**9, 10**9, size=5, dtype=np.int64) + dt64arr = arr.view('datetime64[ns]') + dti = pd.date_range('1960-01-01', periods=1, tz=tz) + + v1 = DatetimeArray._simple_new(arr.view('i8'), dtype=dti.dtype) + v2 = DatetimeArray(arr.view('i8'), dtype=dti.dtype) + v3 = DatetimeArray._from_sequence(arr.view('i8'), dtype=dti.dtype) + v4 = pd.DatetimeIndex._simple_new(arr.view('i8'), tz=dti.tz) + v5 = pd.DatetimeIndex(arr.view('i8'), tz=dti.tz) + v6 = pd.to_datetime(arr, utc=True).tz_convert(dti.tz) + + # when dealing with _simple_new, i8 and M8[ns] are interchangeable + v7 = DatetimeArray._simple_new(arr.view('M8[ns]'), dtype=dti.dtype) + v8 = pd.DatetimeIndex._simple_new(arr.view('M8[ns]'), dtype=dti.dtype) + + # GH#24623 DatetimeArray.__init__ treats M8[ns] as unix timestamps, + # unlike DatetimeIndex.__new__. + v9 = DatetimeArray(dt64arr, dtype=dti.dtype) + + tm.assert_datetime_array_equal(v1, v2) + tm.assert_datetime_array_equal(v1, v3) + tm.assert_datetime_array_equal(v1, v4._data) + tm.assert_datetime_array_equal(v1, v5._data) + tm.assert_datetime_array_equal(v1, v6._data) + tm.assert_datetime_array_equal(v1, v7) + tm.assert_datetime_array_equal(v1, v8._data) + tm.assert_datetime_array_equal(v1, v9) + + expected = [pd.Timestamp(i8, tz=dti.tz) for i8 in arr] + assert list(v1) == expected + + # The guarantees for datetime64 data are fewer + v1 = DatetimeArray._from_sequence(dt64arr, dtype=dti.dtype) + v2 = DatetimeArray._from_sequence(dt64arr, tz=dti.tz) + v3 = pd.DatetimeIndex(dt64arr, dtype=dti.dtype) + v4 = pd.DatetimeIndex(dt64arr, tz=dti.tz) + + tm.assert_datetime_array_equal(v1, v2) + tm.assert_datetime_array_equal(v1, v3._data) + tm.assert_datetime_array_equal(v1, v4._data) + + def test_freq_validation(self): + # GH#24623 check that invalid instances cannot be created with the + # public constructor + arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 + + msg = ("Inferred frequency H from passed values does not " + "conform to passed frequency W-SUN") + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, freq="W") + @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, @@ -35,6 +98,7 @@ def test_mixing_naive_tzaware_raises(self, meth): meth(obj) def test_from_pandas_array(self): + # GH#24623, GH#24615 arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') @@ -46,7 +110,8 @@ def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') - with pytest.raises(TypeError, match='Timezone of the array'): + with pytest.raises(TypeError, + match='data is already tz-aware US/Central'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): @@ -69,10 +134,11 @@ def test_freq_infer_raises(self): def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) - assert arr._data is data + assert arr._data.base is data arr = DatetimeArray(data, copy=True) assert arr._data is not data + assert arr._data.base is not data class TestDatetimeArrayComparisons(object): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 657f5f193c85e..f3e78a3157399 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1255,7 +1255,7 @@ def test_array(array, attr, box): array = getattr(array, attr) result = getattr(result, attr) - assert result is array + assert result is array or result.base is array.base def test_array_multiindex_raises():
One option for #24559, with some spillover into #24567. Restores freq validation (needs test) Retains a _private_ fastpath - [x] Closes #24615
https://api.github.com/repos/pandas-dev/pandas/pulls/24623
2019-01-04T19:36:43Z
2019-01-10T16:30:09Z
null
2019-01-10T16:30:14Z
ensure DatetimeTZBlock always gets a DatetimeArray
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f88114e1c9e20..5f2fc17b08ac6 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2240,24 +2240,11 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): is_datetimetz = True is_extension = True - def __init__(self, values, placement, ndim=2, dtype=None): - # XXX: This will end up calling _maybe_coerce_values twice - # when dtype is not None. It's relatively cheap (just an isinstance) - # but it'd nice to avoid. - # - # If we can remove dtype from __init__, and push that conversion - # push onto the callers, then we can remove this entire __init__ - # and just use DatetimeBlock's. - if dtype is not None: - values = self._maybe_coerce_values(values, dtype=dtype) - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim) - @property def _holder(self): return DatetimeArray - def _maybe_coerce_values(self, values, dtype=None): + def _maybe_coerce_values(self, values): """Input validation for values passed to __init__. Ensure that we have datetime64TZ, coercing if necessary. @@ -2265,19 +2252,14 @@ def _maybe_coerce_values(self, values, dtype=None): ----------- values : array-like Must be convertible to datetime64 - dtype : string or DatetimeTZDtype, optional - Does a shallow copy to this tz Returns ------- - values : ndarray[datetime64ns] + values : DatetimeArray """ if not isinstance(values, self._holder): values = self._holder(values) - if dtype is not None: - values = type(values)(values, dtype=dtype) - if values.tz is None: raise ValueError("cannot create a DatetimeTZBlock without a tz") @@ -3087,8 +3069,9 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, klass = get_block_type(values, dtype) elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values): - return klass(values, ndim=ndim, - placement=placement, dtype=dtype) + # TODO: This is no longer hit internally; does it need to be retained + # for e.g. pyarrow? + values = DatetimeArray(values, dtype) return klass(values, ndim=ndim, placement=placement) diff --git a/pandas/io/packers.py b/pandas/io/packers.py index e6d18d5d4193a..b83eab7d0eba0 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -53,14 +53,15 @@ BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_object_dtype, needs_i8_conversion, pandas_dtype) + is_categorical_dtype, is_datetime64tz_dtype, is_object_dtype, + needs_i8_conversion, pandas_dtype) from pandas import ( # noqa:F401 Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Index, Int64Index, Interval, IntervalIndex, MultiIndex, NaT, Panel, Period, PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp) from pandas.core import internals -from pandas.core.arrays import IntervalArray, PeriodArray +from pandas.core.arrays import DatetimeArray, IntervalArray, PeriodArray from pandas.core.arrays.sparse import BlockIndex, IntIndex from pandas.core.generic import NDFrame from pandas.core.internals import BlockManager, _safe_reshape, make_block @@ -651,6 +652,12 @@ def create_block(b): placement = b[u'locs'] else: placement = axes[0].get_indexer(b[u'items']) + + if is_datetime64tz_dtype(b[u'dtype']): + assert isinstance(values, np.ndarray), type(values) + assert values.dtype == 'M8[ns]', values.dtype + values = DatetimeArray(values, dtype=b[u'dtype']) + return make_block(values=values, klass=getattr(internals, b[u'klass']), placement=placement,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24622
2019-01-04T18:39:56Z
2019-01-05T14:51:59Z
2019-01-05T14:51:59Z
2019-01-05T15:39:37Z
Fix 32-bit builds by correctly using intp_t instead of int64_t for numpy.searchsorted result
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 7f06784062d1a..7c9c2cafd1afb 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -3,7 +3,7 @@ import cython import numpy as np cimport numpy as cnp -from numpy cimport uint8_t, int64_t, int32_t, ndarray +from numpy cimport uint8_t, int64_t, int32_t, intp_t, ndarray cnp.import_array() import pytz @@ -639,7 +639,7 @@ cdef inline int64_t[:] _tz_convert_dst(int64_t[:] values, tzinfo tz, cdef: Py_ssize_t n = len(values) Py_ssize_t i - int64_t[:] pos + intp_t[:] pos int64_t[:] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans int64_t[:] deltas
See discussion on #24613 - [x] closes #24613 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24621
2019-01-04T17:39:42Z
2019-01-04T18:37:59Z
2019-01-04T18:37:59Z
2019-01-04T18:41:45Z
CLN: _try_coerce_result, redundant dtypes.missing method
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index c7f06bc5d7d4f..e922a5d1c3b27 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -12,8 +12,7 @@ cimport pandas._libs.util as util from pandas._libs.tslibs.np_datetime cimport ( get_timedelta64_value, get_datetime64_value) -from pandas._libs.tslibs.nattype cimport checknull_with_nat -from pandas._libs.tslibs.nattype import NaT +from pandas._libs.tslibs.nattype cimport checknull_with_nat, c_NaT cdef float64_t INF = <float64_t>np.inf cdef float64_t NEGINF = -INF @@ -27,7 +26,7 @@ cdef inline bint _check_all_nulls(object val): if isinstance(val, (float, complex)): res = val != val - elif val is NaT: + elif val is c_NaT: res = 1 elif val is None: res = 1 @@ -67,7 +66,7 @@ cpdef bint checknull(object val): return val != val # and val != INF and val != NEGINF elif util.is_datetime64_object(val): return get_datetime64_value(val) == NPY_NAT - elif val is NaT: + elif val is c_NaT: return True elif util.is_timedelta64_object(val): return get_timedelta64_value(val) == NPY_NAT @@ -106,7 +105,7 @@ cpdef bint checknull_old(object val): return val != val or val == INF or val == NEGINF elif util.is_datetime64_object(val): return get_datetime64_value(val) == NPY_NAT - elif val is NaT: + elif val is c_NaT: return True elif util.is_timedelta64_object(val): return get_timedelta64_value(val) == NPY_NAT @@ -190,7 +189,7 @@ def isnaobj_old(ndarray arr): result = np.zeros(n, dtype=np.uint8) for i in range(n): val = arr[i] - result[i] = val is NaT or _check_none_nan_inf_neginf(val) + result[i] = val is c_NaT or _check_none_nan_inf_neginf(val) return result.view(np.bool_) diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index c7765a2c2b89c..38401cab57f5d 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -2,7 +2,7 @@ # flake8: noqa from .conversion import normalize_date, localize_pydatetime, tz_convert_single -from .nattype import NaT, iNaT +from .nattype import NaT, iNaT, is_null_datetimelike from .np_datetime import OutOfBoundsDatetime from .period import Period, IncompatibleFrequency from .timestamps import Timestamp diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd index f649518e969be..ee8d5ca3d861c 100644 --- a/pandas/_libs/tslibs/nattype.pxd +++ b/pandas/_libs/tslibs/nattype.pxd @@ -17,4 +17,4 @@ cdef _NaT c_NaT cdef bint checknull_with_nat(object val) -cdef bint is_null_datetimelike(object val) +cpdef bint is_null_datetimelike(object val) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 604599f895476..df083f27ad653 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -686,7 +686,7 @@ cdef inline bint checknull_with_nat(object val): return val is None or util.is_nan(val) or val is c_NaT -cdef inline bint is_null_datetimelike(object val): +cpdef bint is_null_datetimelike(object val): """ Determine if we have a null for a timedelta/datetime (or integer versions) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index b22cb1050f140..3c6d3f212342b 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -10,9 +10,9 @@ _NS_DTYPE, _TD_DTYPE, ensure_object, is_bool_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike, is_datetimelike_v_numeric, is_dtype_equal, is_extension_array_dtype, - is_float_dtype, is_integer, is_integer_dtype, is_object_dtype, - is_period_dtype, is_scalar, is_string_dtype, is_string_like_dtype, - is_timedelta64_dtype, needs_i8_conversion, pandas_dtype) + is_float_dtype, is_integer_dtype, is_object_dtype, is_period_dtype, + is_scalar, is_string_dtype, is_string_like_dtype, is_timedelta64_dtype, + needs_i8_conversion, pandas_dtype) from .generic import ( ABCDatetimeArray, ABCExtensionArray, ABCGeneric, ABCIndexClass, ABCMultiIndex, ABCSeries, ABCTimedeltaArray) @@ -339,22 +339,6 @@ def notna(obj): notnull = notna -def is_null_datelike_scalar(other): - """ test whether the object is a null datelike, e.g. Nat - but guard against passing a non-scalar """ - if other is NaT or other is None: - return True - elif is_scalar(other): - - # a timedelta - if hasattr(other, 'dtype'): - return other.view('i8') == iNaT - elif is_integer(other) and other == iNaT: - return True - return isna(other) - return False - - def _isna_compat(arr, fill_value=np.nan): """ Parameters diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f88114e1c9e20..721215538af37 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -8,7 +8,7 @@ import numpy as np from pandas._libs import internals as libinternals, lib, tslib, tslibs -from pandas._libs.tslibs import Timedelta, conversion +from pandas._libs.tslibs import Timedelta, conversion, is_null_datetimelike import pandas.compat as compat from pandas.compat import range, zip from pandas.util._validators import validate_bool_kwarg @@ -31,7 +31,7 @@ ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, ABCIndexClass, ABCSeries) from pandas.core.dtypes.missing import ( - _isna_compat, array_equivalent, is_null_datelike_scalar, isna, notna) + _isna_compat, array_equivalent, isna, notna) import pandas.core.algorithms as algos from pandas.core.arrays import ( @@ -2077,10 +2077,6 @@ def get_values(self, dtype=None): return values return self.values - @property - def asi8(self): - return self.values.view('i8') - class DatetimeBlock(DatetimeLikeBlockMixin, Block): __slots__ = () @@ -2162,7 +2158,7 @@ def _try_coerce_args(self, values, other): if isinstance(other, bool): raise TypeError - elif is_null_datelike_scalar(other): + elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) @@ -2175,18 +2171,16 @@ def _try_coerce_args(self, values, other): else: # coercion issues # let higher levels handle - raise TypeError + raise TypeError(other) return values, other def _try_coerce_result(self, result): """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - try: - result = result.astype('M8[ns]') - except ValueError: - pass + if result.dtype.kind in ['i', 'f']: + result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): result = self._box_func(result) return result @@ -2364,8 +2358,7 @@ def _try_coerce_args(self, values, other): # add the tz back other = self._holder(other, dtype=self.dtype) - elif (is_null_datelike_scalar(other) or - (lib.is_scalar(other) and isna(other))): + elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, self._holder): if other.tz != self.values.tz: @@ -2380,17 +2373,19 @@ def _try_coerce_args(self, values, other): raise ValueError("incompatible or non tz-aware value") other = other.value else: - raise TypeError + raise TypeError(other) return values, other def _try_coerce_result(self, result): """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: + if result.dtype.kind in ['i', 'f']: result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): result = self._box_func(result) + if isinstance(result, np.ndarray): # allow passing of > 1dim if its trivial @@ -2531,20 +2526,16 @@ def _try_coerce_args(self, values, other): if isinstance(other, bool): raise TypeError - elif is_null_datelike_scalar(other): + elif is_null_datetimelike(other): other = tslibs.iNaT - elif isinstance(other, Timedelta): - other = other.value - elif isinstance(other, timedelta): - other = Timedelta(other).value - elif isinstance(other, np.timedelta64): + elif isinstance(other, (timedelta, np.timedelta64)): other = Timedelta(other).value elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle - raise TypeError + raise TypeError(other) return values, other @@ -2552,11 +2543,13 @@ def _try_coerce_result(self, result): """ reverse of try_coerce_args / try_operate """ if isinstance(result, np.ndarray): mask = isna(result) - if result.dtype.kind in ['i', 'f', 'O']: + if result.dtype.kind in ['i', 'f']: result = result.astype('m8[ns]') result[mask] = tslibs.iNaT + elif isinstance(result, (np.integer, np.float)): result = self._box_func(result) + return result def should_store(self, value): diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 4ea4531c53c72..db4d3e876dec5 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -16,9 +16,9 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution -from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.common import is_integer, is_scalar from pandas.core.dtypes.generic import ABCSeries, ABCSparseSeries -from pandas.core.dtypes.missing import is_integer, isna, notna +from pandas.core.dtypes.missing import isna, notna from pandas.core import generic from pandas.core.arrays import SparseArray diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index fb9355dfed645..de937d1a4c526 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -23,6 +23,7 @@ def test_namespace(): api = ['NaT', 'iNaT', + 'is_null_datetimelike', 'OutOfBoundsDatetime', 'Period', 'IncompatibleFrequency',
Started this branch with the idea of making DatetimeBlock, TimedeltaBlock, DatetimeTZBlock define _try_coerce_result using self._holder._from_sequence (which I still think is worthwhile, cc @TomAugspurger ) and got side-tracked. This ends up just being some cleanup and removal of a redundant method. Tiny perf bump expected from optimizing NaT checks in _libs.missing. Further improvements/cleanups may be possible pending #24607. The object-dtype cases in try_coerce_result are no longer hit following #24606.
https://api.github.com/repos/pandas-dev/pandas/pulls/24619
2019-01-04T16:29:56Z
2019-01-05T14:51:37Z
2019-01-05T14:51:37Z
2019-01-05T15:40:36Z
feature : forces index_type comparaison when asserting that one Dataf…
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d0555bd2e44b1..90878f6406940 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3744,6 +3744,24 @@ def reindex_like(self, other, method=None, copy=True, limit=None, copy=copy, limit=limit, tolerance=tolerance) + if isinstance(self.index, MultiIndex) and isinstance(other.index, + MultiIndex): + types_self = ["{} : {}".format(self.index.names[i], + self.index.get_level_values( + n).dtype) + for i, n in enumerate(self.index.names)] + types_other = ["{} : {}".format(other.index.names[i], + other.index.get_level_values( + n).dtype) + for i, n in enumerate(other.index.names)] + assert types_other == types_self, \ + "columns must have same names and dtypes when reindexing" + + else: + assert type(self.index) == type(other.index), \ + "columns must have same names and dtypes when reindexing" \ + # noqa: E721 + return self.reindex(**d) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index 1a941c0f0c265..55cab53208fb9 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -207,3 +207,42 @@ def test_frame_equal_unicode(df1, df2, msg, by_blocks): # when comparing DataFrames containing differing unicode objects. with pytest.raises(AssertionError, match=msg): assert_frame_equal(df1, df2, by_blocks=by_blocks) + + +def test_frame_equal_index_different_type(): + left = DataFrame(data=[[3, 7.45678], [12, 37.45678]], + columns=["one", "two"]) + + right = left.copy() + right.iloc[0, :], right.iloc[1, :] = right.iloc[1, :], right.iloc[0, :] + right.one = right.one.astype(str) + + right = right.set_index("one") + left = left.set_index("one") + + msg = "columns must have same names and dtypes when reindexing" + + with pytest.raises(AssertionError, match=msg): + assert_frame_equal(left, right, check_like=True) + + +def test_frame_equal_index_crossed_different_type(): + honest = DataFrame(data=[["2", 3, 4], ["1", 2, 3], ["6", 6, 8]], + columns=["a_string", "a_int", "some_data"]) + liar = DataFrame(data=[["2", 3, 4], ["1", 2, 3], ["6", 6, 8]], + columns=["a_int", "a_string", "some_data"]) + + honest = honest.set_index(["a_string", "a_int"]) + liar = liar.set_index(["a_string", "a_int"]) + + msg = """columns must have same names and dtypes when reindexing""" + + with pytest.raises(AssertionError, match=msg): + assert_frame_equal(honest, liar, check_like=True) + + +def test_frame_equal_multi_index(): + a = DataFrame(data=[["2", 3, 4], ["1", 2, 3], ["6", 6, 8]], + columns=["a_string", "a_int", "some_data"]) + + _assert_frame_equal_both(a, a.copy())
Until now, when comparing if 2 Dataframes were like each other, reindexing occurred as a inner mechanism. It turns out, if the two indexes are not of the same type, one of the Dataframe was emptied and the assertion always failed. This fix forces the comparison of the index types if this specific case. Example of code that will work with this fix ``` import pandas as pd left = pd.DataFrame(data=[['20181130', 3, 7.45678, '20181129'], ['20181130', 12, 37.45678, '20181129']], columns=["tomorrow", "one", "two", "today"]) left.today = left.today.astype('datetime64') right = left.copy() right.iloc[0, :], right.iloc[1, :] = right.iloc[1, :], right.iloc[0, :] right.one = right.one.astype(str) right = right.set_index("one") left = left.set_index("one") print(left) print(right) pd.testing.assert_frame_equal(left, right, check_like=True) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24618
2019-01-04T15:56:51Z
2019-02-27T23:57:54Z
null
2019-02-27T23:57:54Z
check azure build
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index d3b6a237a97a1..8b08853829f6e 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -7,6 +7,11 @@ import pandas as pd # noqa +def test_warning_raises(): + import numpy as np + np.dtype("Int64") + + @pytest.mark.parametrize( "exc", ['UnsupportedFunctionCall', 'UnsortedIndexError', 'OutOfBoundsDatetime',
This should fail the numpydev build on azure.
https://api.github.com/repos/pandas-dev/pandas/pulls/24616
2019-01-04T14:54:42Z
2019-01-17T15:11:13Z
null
2019-01-17T15:11:18Z
REF/TST: Add more pytest idiom to test_to_html.py
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 889b903088afa..554cfd306e2a7 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -38,564 +38,565 @@ def expected_html(datapath, name): return html.rstrip() -class TestToHTML(object): - - def test_to_html_with_col_space(self): - def check_with_width(df, col_space): - # check that col_space affects HTML generation - # and be very brittle about it. - html = df.to_html(col_space=col_space) - hdrs = [x for x in html.split(r"\n") if re.search(r"<th[>\s]", x)] - assert len(hdrs) > 0 - for h in hdrs: - assert "min-width" in h - assert str(col_space) in h - - df = DataFrame(np.random.random(size=(1, 3))) - - check_with_width(df, 30) - check_with_width(df, 50) - - def test_to_html_with_empty_string_label(self): - # GH 3547, to_html regards empty string labels as repeated labels - data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]} - df = DataFrame(data).set_index(['c1', 'c2']) - result = df.to_html() - assert "rowspan" not in result - - def test_to_html_unicode(self, datapath): - df = DataFrame({u('\u03c3'): np.arange(10.)}) - expected = expected_html(datapath, 'unicode_1') - assert df.to_html() == expected - df = DataFrame({'A': [u('\u03c3')]}) - expected = expected_html(datapath, 'unicode_2') - assert df.to_html() == expected - - def test_to_html_decimal(self, datapath): - # GH 12031 - df = DataFrame({'A': [6.0, 3.1, 2.2]}) - result = df.to_html(decimal=',') - expected = expected_html(datapath, 'gh12031_expected_output') - assert result == expected - - def test_to_html_escaped(self, datapath): - a = 'str<ing1 &amp;' - b = 'stri>ng2 &amp;' - - test_dict = {'co<l1': {a: "<type 'str'>", - b: "<type 'str'>"}, - 'co>l2': {a: "<type 'str'>", - b: "<type 'str'>"}} - result = DataFrame(test_dict).to_html() - expected = expected_html(datapath, 'escaped') - assert result == expected - - def test_to_html_escape_disabled(self, datapath): - a = 'str<ing1 &amp;' - b = 'stri>ng2 &amp;' - - test_dict = {'co<l1': {a: "<b>bold</b>", - b: "<b>bold</b>"}, - 'co>l2': {a: "<b>bold</b>", - b: "<b>bold</b>"}} - result = DataFrame(test_dict).to_html(escape=False) - expected = expected_html(datapath, 'escape_disabled') - assert result == expected - - def test_to_html_multiindex_index_false(self, datapath): - # GH 8452 - df = DataFrame({ - 'a': range(2), - 'b': range(3, 5), - 'c': range(5, 7), - 'd': range(3, 5) - }) - df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) - result = df.to_html(index=False) - expected = expected_html(datapath, 'gh8452_expected_output') - assert result == expected - +@pytest.fixture(params=['mixed', 'empty']) +def biggie_df_fixture(request): + """Fixture for a big mixed Dataframe and an empty Dataframe""" + if request.param == 'mixed': + df = DataFrame({'A': np.random.randn(200), + 'B': tm.makeStringIndex(200)}, + index=lrange(200)) + df.loc[:20, 'A'] = np.nan + df.loc[:20, 'B'] = np.nan + return df + elif request.param == 'empty': + df = DataFrame(index=np.arange(200)) + return df + + +@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS) +def justify(request): + return request.param + + +@pytest.mark.parametrize('col_space', [30, 50]) +def test_to_html_with_col_space(col_space): + df = DataFrame(np.random.random(size=(1, 3))) + # check that col_space affects HTML generation + # and be very brittle about it. + result = df.to_html(col_space=col_space) + hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)] + assert len(hdrs) > 0 + for h in hdrs: + assert "min-width" in h + assert str(col_space) in h + + +def test_to_html_with_empty_string_label(): + # GH 3547, to_html regards empty string labels as repeated labels + data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]} + df = DataFrame(data).set_index(['c1', 'c2']) + result = df.to_html() + assert "rowspan" not in result + + +@pytest.mark.parametrize('df,expected', [ + (DataFrame({u('\u03c3'): np.arange(10.)}), 'unicode_1'), + (DataFrame({'A': [u('\u03c3')]}), 'unicode_2') +]) +def test_to_html_unicode(df, expected, datapath): + expected = expected_html(datapath, expected) + result = df.to_html() + assert result == expected + + +def test_to_html_decimal(datapath): + # GH 12031 + df = DataFrame({'A': [6.0, 3.1, 2.2]}) + result = df.to_html(decimal=',') + expected = expected_html(datapath, 'gh12031_expected_output') + assert result == expected + + +@pytest.mark.parametrize('kwargs,string,expected', [ + (dict(), "<type 'str'>", 'escaped'), + (dict(escape=False), "<b>bold</b>", 'escape_disabled') +]) +def test_to_html_escaped(kwargs, string, expected, datapath): + a = 'str<ing1 &amp;' + b = 'stri>ng2 &amp;' + + test_dict = {'co<l1': {a: string, + b: string}, + 'co>l2': {a: string, + b: string}} + result = DataFrame(test_dict).to_html(**kwargs) + expected = expected_html(datapath, expected) + assert result == expected + + +@pytest.mark.parametrize('index_is_named', [True, False]) +def test_to_html_multiindex_index_false(index_is_named, datapath): + # GH 8452 + df = DataFrame({ + 'a': range(2), + 'b': range(3, 5), + 'c': range(5, 7), + 'd': range(3, 5) + }) + df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) + if index_is_named: df.index = Index(df.index.values, name='idx') - result = df.to_html(index=False) - assert result == expected - - def test_to_html_multiindex_sparsify_false_multi_sparse(self, datapath): - with option_context('display.multi_sparse', False): - index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], - names=['foo', None]) - - df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index) - result = df.to_html() - expected = expected_html( - datapath, 'multiindex_sparsify_false_multi_sparse_1') - assert result == expected - - df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], - columns=index[::2], index=index) - result = df.to_html() - expected = expected_html( - datapath, 'multiindex_sparsify_false_multi_sparse_2') - assert result == expected - - def test_to_html_multiindex_sparsify(self, datapath): - index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], - names=['foo', None]) - - df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index) - result = df.to_html() - expected = expected_html(datapath, 'multiindex_sparsify_1') - assert result == expected - - df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=index[::2], - index=index) + result = df.to_html(index=False) + expected = expected_html(datapath, 'gh8452_expected_output') + assert result == expected + + +@pytest.mark.parametrize('multi_sparse,expected', [ + (False, 'multiindex_sparsify_false_multi_sparse_1'), + (False, 'multiindex_sparsify_false_multi_sparse_2'), + (True, 'multiindex_sparsify_1'), + (True, 'multiindex_sparsify_2') +]) +def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath): + index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], + names=['foo', None]) + df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index) + if expected.endswith('2'): + df.columns = index[::2] + with option_context('display.multi_sparse', multi_sparse): result = df.to_html() - expected = expected_html(datapath, 'multiindex_sparsify_2') - assert result == expected - - def test_to_html_multiindex_odd_even_truncate(self, datapath): - # GH 14882 - Issue on truncation with odd length DataFrame - mi = MultiIndex.from_product([[100, 200, 300], - [10, 20, 30], - [1, 2, 3, 4, 5, 6, 7]], - names=['a', 'b', 'c']) - df = DataFrame({'n': range(len(mi))}, index=mi) - result = df.to_html(max_rows=60) - expected = expected_html(datapath, 'gh14882_expected_output_1') - assert result == expected - - # Test that ... appears in a middle level - result = df.to_html(max_rows=56) - expected = expected_html(datapath, 'gh14882_expected_output_2') - assert result == expected - - def test_to_html_index_formatter(self, datapath): - df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=['foo', None], - index=lrange(4)) - - f = lambda x: 'abcd' [x] - result = df.to_html(formatters={'__index__': f}) - expected = expected_html(datapath, 'index_formatter') - assert result == expected - - def test_to_html_datetime64_monthformatter(self, datapath): - months = [datetime(2016, 1, 1), datetime(2016, 2, 2)] - x = DataFrame({'months': months}) - - def format_func(x): - return x.strftime('%Y-%m') - result = x.to_html(formatters={'months': format_func}) - expected = expected_html(datapath, 'datetime64_monthformatter') - assert result == expected - - def test_to_html_datetime64_hourformatter(self, datapath): - - x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'], - format='%H:%M:%S.%f')}) - - def format_func(x): - return x.strftime('%H:%M') - result = x.to_html(formatters={'hod': format_func}) - expected = expected_html(datapath, 'datetime64_hourformatter') - assert result == expected - - def test_to_html_regression_GH6098(self): - df = DataFrame({ - u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')], - u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')], - 'données1': np.random.randn(5), - 'données2': np.random.randn(5)}) - - # it works - df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_() - - def test_to_html_truncate(self, datapath): - index = pd.date_range(start='20010101', freq='D', periods=20) - df = DataFrame(index=index, columns=range(20)) - result = df.to_html(max_rows=8, max_cols=4) - expected = expected_html(datapath, 'truncate') - assert result == expected - - def test_to_html_truncate_multi_index(self, datapath): - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - df = DataFrame(index=arrays, columns=arrays) - result = df.to_html(max_rows=7, max_cols=7) - expected = expected_html(datapath, 'truncate_multi_index') - assert result == expected - - def test_to_html_truncate_multi_index_sparse_off(self, datapath): - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - df = DataFrame(index=arrays, columns=arrays) - result = df.to_html(max_rows=7, max_cols=7, sparsify=False) - expected = expected_html(datapath, 'truncate_multi_index_sparse_off') - assert result == expected - - def test_to_html_border(self): - df = DataFrame({'A': [1, 2]}) - result = df.to_html() - assert 'border="1"' in result - - def test_to_html_border_option(self): - df = DataFrame({'A': [1, 2]}) - with option_context('display.html.border', 0): - result = df.to_html() - assert 'border="0"' in result - assert 'border="0"' in df._repr_html_() - - def test_to_html_border_zero(self): - df = DataFrame({'A': [1, 2]}) - result = df.to_html(border=0) - assert 'border="0"' in result - - def test_display_option_warning(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - pd.options.html.border - - def test_to_html(self): - # big mixed - biggie = DataFrame({'A': np.random.randn(200), - 'B': tm.makeStringIndex(200)}, - index=lrange(200)) - - biggie.loc[:20, 'A'] = np.nan - biggie.loc[:20, 'B'] = np.nan - s = biggie.to_html() - - buf = StringIO() - retval = biggie.to_html(buf=buf) - assert retval is None - assert buf.getvalue() == s - - assert isinstance(s, compat.string_types) - - biggie.to_html(columns=['B', 'A'], col_space=17) - biggie.to_html(columns=['B', 'A'], - formatters={'A': lambda x: '{x:.1f}'.format(x=x)}) - - biggie.to_html(columns=['B', 'A'], float_format=str) - biggie.to_html(columns=['B', 'A'], col_space=12, float_format=str) - - frame = DataFrame(index=np.arange(200)) - frame.to_html() - - def test_to_html_filename(self): - biggie = DataFrame({'A': np.random.randn(200), - 'B': tm.makeStringIndex(200)}, - index=lrange(200)) - - biggie.loc[:20, 'A'] = np.nan - biggie.loc[:20, 'B'] = np.nan - with tm.ensure_clean('test.html') as path: - biggie.to_html(path) - with open(path, 'r') as f: - s = biggie.to_html() - s2 = f.read() - assert s == s2 - - frame = DataFrame(index=np.arange(200)) - with tm.ensure_clean('test.html') as path: - frame.to_html(path) - with open(path, 'r') as f: - assert frame.to_html() == f.read() - - def test_to_html_with_no_bold(self): - x = DataFrame({'x': np.random.randn(5)}) - ashtml = x.to_html(bold_rows=False) - assert '<strong' not in ashtml[ashtml.find("</thead>")] - - def test_to_html_columns_arg(self): - frame = DataFrame(tm.getSeriesData()) - result = frame.to_html(columns=['A']) - assert '<th>B</th>' not in result - - def test_to_html_multiindex(self, datapath): - columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2), - np.mod(lrange(4), 2))), - names=['CL0', 'CL1']) - df = DataFrame([list('abcd'), list('efgh')], columns=columns) - result = df.to_html(justify='left') - expected = expected_html(datapath, 'multiindex_1') - assert result == expected - - columns = MultiIndex.from_tuples(list(zip( - range(4), np.mod( - lrange(4), 2)))) - df = DataFrame([list('abcd'), list('efgh')], columns=columns) - - result = df.to_html(justify='right') - expected = expected_html(datapath, 'multiindex_2') - assert result == expected - - @pytest.mark.parametrize("justify", fmt._VALID_JUSTIFY_PARAMETERS) - def test_to_html_justify(self, justify, datapath): - df = DataFrame({'A': [6, 30000, 2], - 'B': [1, 2, 70000], - 'C': [223442, 0, 1]}, - columns=['A', 'B', 'C']) - result = df.to_html(justify=justify) - expected = expected_html(datapath, 'justify').format(justify=justify) - assert result == expected - - @pytest.mark.parametrize("justify", ["super-right", "small-left", - "noinherit", "tiny", "pandas"]) - def test_to_html_invalid_justify(self, justify): - # GH 17527 - df = DataFrame() - msg = "Invalid value for justify parameter" - - with pytest.raises(ValueError, match=msg): - df.to_html(justify=justify) - - def test_to_html_index(self, datapath): - index = ['foo', 'bar', 'baz'] - df = DataFrame({'A': [1, 2, 3], - 'B': [1.2, 3.4, 5.6], - 'C': ['one', 'two', np.nan]}, - columns=['A', 'B', 'C'], - index=index) - expected_with_index = expected_html(datapath, 'index_1') - assert df.to_html() == expected_with_index - - expected_without_index = expected_html(datapath, 'index_2') - result = df.to_html(index=False) - for i in index: - assert i not in result - assert result == expected_without_index - df.index = Index(['foo', 'bar', 'baz'], name='idx') - expected_with_index = expected_html(datapath, 'index_3') - assert df.to_html() == expected_with_index - assert df.to_html(index=False) == expected_without_index - - tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')] - df.index = MultiIndex.from_tuples(tuples) - - expected_with_index = expected_html(datapath, 'index_4') - assert df.to_html() == expected_with_index - - result = df.to_html(index=False) - for i in ['foo', 'bar', 'car', 'bike']: - assert i not in result - # must be the same result as normal index - assert result == expected_without_index - - df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2']) - expected_with_index = expected_html(datapath, 'index_5') - assert df.to_html() == expected_with_index - assert df.to_html(index=False) == expected_without_index - - def test_to_html_with_classes(self, datapath): - df = DataFrame() - result = df.to_html(classes="sortable draggable") - expected = expected_html(datapath, 'with_classes') - assert result == expected - - result = df.to_html(classes=["sortable", "draggable"]) - assert result == expected - - def test_to_html_no_index_max_rows(self, datapath): - # GH 14998 - df = DataFrame({"A": [1, 2, 3, 4]}) - result = df.to_html(index=False, max_rows=1) - expected = expected_html(datapath, 'gh14998_expected_output') - assert result == expected - - def test_to_html_multiindex_max_cols(self, datapath): - # GH 6131 - index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']], - codes=[[0, 1, 2], [0, 1, 2]], - names=['b', 'c']) - columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']], - codes=[[0, 0, 0], [0, 1, 2]], - names=[None, 'a']) - data = np.array( - [[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]]) - df = DataFrame(data, index, columns) - result = df.to_html(max_cols=2) - expected = expected_html(datapath, 'gh6131_expected_output') - assert result == expected - - def test_to_html_multi_indexes_index_false(self, datapath): - # GH 22579 - df = DataFrame({'a': range(10), 'b': range(10, 20), 'c': range(10, 20), - 'd': range(10, 20)}) - df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) - df.index = MultiIndex.from_product([['a', 'b'], - ['c', 'd', 'e', 'f', 'g']]) - result = df.to_html(index=False) - expected = expected_html(datapath, 'gh22579_expected_output') - assert result == expected - - @pytest.mark.parametrize('index_names', [True, False]) - @pytest.mark.parametrize('header', [True, False]) - @pytest.mark.parametrize('index', [True, False]) - @pytest.mark.parametrize('column_index, column_type', [ - (Index([0, 1]), 'unnamed_standard'), - (Index([0, 1], name='columns.name'), 'named_standard'), - (MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'), - (MultiIndex.from_product( - [['a'], ['b', 'c']], names=['columns.name.0', - 'columns.name.1']), 'named_multi') - ]) - @pytest.mark.parametrize('row_index, row_type', [ - (Index([0, 1]), 'unnamed_standard'), - (Index([0, 1], name='index.name'), 'named_standard'), - (MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'), - (MultiIndex.from_product( - [['a'], ['b', 'c']], names=['index.name.0', - 'index.name.1']), 'named_multi') - ]) - def test_to_html_basic_alignment( - self, datapath, row_index, row_type, column_index, column_type, - index, header, index_names): - # GH 22747, GH 22579 - df = DataFrame(np.zeros((2, 2), dtype=int), - index=row_index, columns=column_index) - result = df.to_html( - index=index, header=header, index_names=index_names) - - if not index: - row_type = 'none' - elif not index_names and row_type.startswith('named'): - row_type = 'un' + row_type - - if not header: - column_type = 'none' - elif not index_names and column_type.startswith('named'): - column_type = 'un' + column_type - - filename = 'index_' + row_type + '_columns_' + column_type - expected = expected_html(datapath, filename) - assert result == expected - - @pytest.mark.parametrize('index_names', [True, False]) - @pytest.mark.parametrize('header', [True, False]) - @pytest.mark.parametrize('index', [True, False]) - @pytest.mark.parametrize('column_index, column_type', [ - (Index(np.arange(8)), 'unnamed_standard'), - (Index(np.arange(8), name='columns.name'), 'named_standard'), - (MultiIndex.from_product( - [['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'), - (MultiIndex.from_product( - [['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']), - 'named_multi') - ]) - @pytest.mark.parametrize('row_index, row_type', [ - (Index(np.arange(8)), 'unnamed_standard'), - (Index(np.arange(8), name='index.name'), 'named_standard'), - (MultiIndex.from_product( - [['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'), - (MultiIndex.from_product( - [['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']), - 'named_multi') - ]) - def test_to_html_alignment_with_truncation( - self, datapath, row_index, row_type, column_index, column_type, - index, header, index_names): - # GH 22747, GH 22579 - df = DataFrame(np.arange(64).reshape(8, 8), - index=row_index, columns=column_index) - result = df.to_html( - max_rows=4, max_cols=4, - index=index, header=header, index_names=index_names) - - if not index: - row_type = 'none' - elif not index_names and row_type.startswith('named'): - row_type = 'un' + row_type - - if not header: - column_type = 'none' - elif not index_names and column_type.startswith('named'): - column_type = 'un' + column_type - - filename = 'trunc_df_index_' + row_type + '_columns_' + column_type - expected = expected_html(datapath, filename) - assert result == expected - - @pytest.mark.parametrize('index', [False, 0]) - def test_to_html_truncation_index_false_max_rows(self, datapath, index): - # GH 15019 - data = [[1.764052, 0.400157], - [0.978738, 2.240893], - [1.867558, -0.977278], - [0.950088, -0.151357], - [-0.103219, 0.410599]] - df = DataFrame(data) - result = df.to_html(max_rows=4, index=index) - expected = expected_html(datapath, 'gh15019_expected_output') - assert result == expected - - @pytest.mark.parametrize('index', [False, 0]) - @pytest.mark.parametrize('col_index_named, expected_output', [ - (False, 'gh22783_expected_output'), - (True, 'gh22783_named_columns_index') - ]) - def test_to_html_truncation_index_false_max_cols( - self, datapath, index, col_index_named, expected_output): - # GH 22783 - data = [[1.764052, 0.400157, 0.978738, 2.240893, 1.867558], - [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599]] - df = DataFrame(data) - if col_index_named: - df.columns.rename('columns.name', inplace=True) - result = df.to_html(max_cols=4, index=index) - expected = expected_html(datapath, expected_output) - assert result == expected - - def test_to_html_notebook_has_style(self): - df = DataFrame({"A": [1, 2, 3]}) - result = df.to_html(notebook=True) + expected = expected_html(datapath, expected) + assert result == expected + + +@pytest.mark.parametrize('max_rows,expected', [ + (60, 'gh14882_expected_output_1'), + + # Test that ... appears in a middle level + (56, 'gh14882_expected_output_2') +]) +def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath): + # GH 14882 - Issue on truncation with odd length DataFrame + index = MultiIndex.from_product([[100, 200, 300], + [10, 20, 30], + [1, 2, 3, 4, 5, 6, 7]], + names=['a', 'b', 'c']) + df = DataFrame({'n': range(len(index))}, index=index) + result = df.to_html(max_rows=max_rows) + expected = expected_html(datapath, expected) + assert result == expected + + +@pytest.mark.parametrize('df,formatters,expected', [ + (DataFrame( + [[0, 1], [2, 3], [4, 5], [6, 7]], + columns=['foo', None], index=lrange(4)), + {'__index__': lambda x: 'abcd' [x]}, + 'index_formatter'), + + (DataFrame( + {'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}), + {'months': lambda x: x.strftime('%Y-%m')}, + 'datetime64_monthformatter'), + + (DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'], + format='%H:%M:%S.%f')}), + {'hod': lambda x: x.strftime('%H:%M')}, + 'datetime64_hourformatter') +]) +def test_to_html_formatters(df, formatters, expected, datapath): + expected = expected_html(datapath, expected) + result = df.to_html(formatters=formatters) + assert result == expected + + +def test_to_html_regression_GH6098(): + df = DataFrame({ + u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')], + u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')], + 'données1': np.random.randn(5), + 'données2': np.random.randn(5)}) + + # it works + df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_() + + +def test_to_html_truncate(datapath): + index = pd.date_range(start='20010101', freq='D', periods=20) + df = DataFrame(index=index, columns=range(20)) + result = df.to_html(max_rows=8, max_cols=4) + expected = expected_html(datapath, 'truncate') + assert result == expected + + +@pytest.mark.parametrize('sparsify,expected', [ + (True, 'truncate_multi_index'), + (False, 'truncate_multi_index_sparse_off') +]) +def test_to_html_truncate_multi_index(sparsify, expected, datapath): + arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + df = DataFrame(index=arrays, columns=arrays) + result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify) + expected = expected_html(datapath, expected) + assert result == expected + + +@pytest.mark.parametrize('option,result,expected', [ + (None, lambda df: df.to_html(), '1'), + (None, lambda df: df.to_html(border=0), '0'), + (0, lambda df: df.to_html(), '0'), + (0, lambda df: df._repr_html_(), '0'), +]) +def test_to_html_border(option, result, expected): + df = DataFrame({'A': [1, 2]}) + if option is None: + result = result(df) + else: + with option_context('display.html.border', option): + result = result(df) + expected = 'border="{}"'.format(expected) + assert expected in result + + +def test_display_option_warning(): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + pd.options.html.border + + +@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True) +def test_to_html(biggie_df_fixture): + # TODO: split this test + df = biggie_df_fixture + s = df.to_html() + + buf = StringIO() + retval = df.to_html(buf=buf) + assert retval is None + assert buf.getvalue() == s + + assert isinstance(s, compat.string_types) + + df.to_html(columns=['B', 'A'], col_space=17) + df.to_html(columns=['B', 'A'], + formatters={'A': lambda x: '{x:.1f}'.format(x=x)}) + + df.to_html(columns=['B', 'A'], float_format=str) + df.to_html(columns=['B', 'A'], col_space=12, float_format=str) + + +@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True) +def test_to_html_empty_dataframe(biggie_df_fixture): + df = biggie_df_fixture + df.to_html() + + +def test_to_html_filename(biggie_df_fixture, tmpdir): + df = biggie_df_fixture + expected = df.to_html() + path = tmpdir.join('test.html') + df.to_html(path) + result = path.read() + assert result == expected + + +def test_to_html_with_no_bold(): + df = DataFrame({'x': np.random.randn(5)}) + html = df.to_html(bold_rows=False) + result = html[html.find("</thead>")] + assert '<strong' not in result + + +def test_to_html_columns_arg(): + df = DataFrame(tm.getSeriesData()) + result = df.to_html(columns=['A']) + assert '<th>B</th>' not in result + + +@pytest.mark.parametrize('columns,justify,expected', [ + (MultiIndex.from_tuples( + list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))), + names=['CL0', 'CL1']), + 'left', + 'multiindex_1'), + + (MultiIndex.from_tuples( + list(zip(range(4), np.mod(lrange(4), 2)))), + 'right', + 'multiindex_2') +]) +def test_to_html_multiindex(columns, justify, expected, datapath): + df = DataFrame([list('abcd'), list('efgh')], columns=columns) + result = df.to_html(justify=justify) + expected = expected_html(datapath, expected) + assert result == expected + + +def test_to_html_justify(justify, datapath): + df = DataFrame({'A': [6, 30000, 2], + 'B': [1, 2, 70000], + 'C': [223442, 0, 1]}, + columns=['A', 'B', 'C']) + result = df.to_html(justify=justify) + expected = expected_html(datapath, 'justify').format(justify=justify) + assert result == expected + + +@pytest.mark.parametrize("justify", ["super-right", "small-left", + "noinherit", "tiny", "pandas"]) +def test_to_html_invalid_justify(justify): + # GH 17527 + df = DataFrame() + msg = "Invalid value for justify parameter" + + with pytest.raises(ValueError, match=msg): + df.to_html(justify=justify) + + +def test_to_html_index(datapath): + # TODO: split this test + index = ['foo', 'bar', 'baz'] + df = DataFrame({'A': [1, 2, 3], + 'B': [1.2, 3.4, 5.6], + 'C': ['one', 'two', np.nan]}, + columns=['A', 'B', 'C'], + index=index) + expected_with_index = expected_html(datapath, 'index_1') + assert df.to_html() == expected_with_index + + expected_without_index = expected_html(datapath, 'index_2') + result = df.to_html(index=False) + for i in index: + assert i not in result + assert result == expected_without_index + df.index = Index(['foo', 'bar', 'baz'], name='idx') + expected_with_index = expected_html(datapath, 'index_3') + assert df.to_html() == expected_with_index + assert df.to_html(index=False) == expected_without_index + + tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')] + df.index = MultiIndex.from_tuples(tuples) + + expected_with_index = expected_html(datapath, 'index_4') + assert df.to_html() == expected_with_index + + result = df.to_html(index=False) + for i in ['foo', 'bar', 'car', 'bike']: + assert i not in result + # must be the same result as normal index + assert result == expected_without_index + + df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2']) + expected_with_index = expected_html(datapath, 'index_5') + assert df.to_html() == expected_with_index + assert df.to_html(index=False) == expected_without_index + + +@pytest.mark.parametrize('classes', [ + "sortable draggable", + ["sortable", "draggable"] +]) +def test_to_html_with_classes(classes, datapath): + df = DataFrame() + expected = expected_html(datapath, 'with_classes') + result = df.to_html(classes=classes) + assert result == expected + + +def test_to_html_no_index_max_rows(datapath): + # GH 14998 + df = DataFrame({"A": [1, 2, 3, 4]}) + result = df.to_html(index=False, max_rows=1) + expected = expected_html(datapath, 'gh14998_expected_output') + assert result == expected + + +def test_to_html_multiindex_max_cols(datapath): + # GH 6131 + index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']], + codes=[[0, 1, 2], [0, 1, 2]], + names=['b', 'c']) + columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']], + codes=[[0, 0, 0], [0, 1, 2]], + names=[None, 'a']) + data = np.array( + [[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]]) + df = DataFrame(data, index, columns) + result = df.to_html(max_cols=2) + expected = expected_html(datapath, 'gh6131_expected_output') + assert result == expected + + +def test_to_html_multi_indexes_index_false(datapath): + # GH 22579 + df = DataFrame({'a': range(10), 'b': range(10, 20), 'c': range(10, 20), + 'd': range(10, 20)}) + df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) + df.index = MultiIndex.from_product([['a', 'b'], + ['c', 'd', 'e', 'f', 'g']]) + result = df.to_html(index=False) + expected = expected_html(datapath, 'gh22579_expected_output') + assert result == expected + + +@pytest.mark.parametrize('index_names', [True, False]) +@pytest.mark.parametrize('header', [True, False]) +@pytest.mark.parametrize('index', [True, False]) +@pytest.mark.parametrize('column_index, column_type', [ + (Index([0, 1]), 'unnamed_standard'), + (Index([0, 1], name='columns.name'), 'named_standard'), + (MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'), + (MultiIndex.from_product( + [['a'], ['b', 'c']], names=['columns.name.0', + 'columns.name.1']), 'named_multi') +]) +@pytest.mark.parametrize('row_index, row_type', [ + (Index([0, 1]), 'unnamed_standard'), + (Index([0, 1], name='index.name'), 'named_standard'), + (MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'), + (MultiIndex.from_product( + [['a'], ['b', 'c']], names=['index.name.0', + 'index.name.1']), 'named_multi') +]) +def test_to_html_basic_alignment( + datapath, row_index, row_type, column_index, column_type, + index, header, index_names): + # GH 22747, GH 22579 + df = DataFrame(np.zeros((2, 2), dtype=int), + index=row_index, columns=column_index) + result = df.to_html( + index=index, header=header, index_names=index_names) + + if not index: + row_type = 'none' + elif not index_names and row_type.startswith('named'): + row_type = 'un' + row_type + + if not header: + column_type = 'none' + elif not index_names and column_type.startswith('named'): + column_type = 'un' + column_type + + filename = 'index_' + row_type + '_columns_' + column_type + expected = expected_html(datapath, filename) + assert result == expected + + +@pytest.mark.parametrize('index_names', [True, False]) +@pytest.mark.parametrize('header', [True, False]) +@pytest.mark.parametrize('index', [True, False]) +@pytest.mark.parametrize('column_index, column_type', [ + (Index(np.arange(8)), 'unnamed_standard'), + (Index(np.arange(8), name='columns.name'), 'named_standard'), + (MultiIndex.from_product( + [['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'), + (MultiIndex.from_product( + [['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']), + 'named_multi') +]) +@pytest.mark.parametrize('row_index, row_type', [ + (Index(np.arange(8)), 'unnamed_standard'), + (Index(np.arange(8), name='index.name'), 'named_standard'), + (MultiIndex.from_product( + [['a', 'b'], ['c', 'd'], ['e', 'f']]), 'unnamed_multi'), + (MultiIndex.from_product( + [['a', 'b'], ['c', 'd'], ['e', 'f']], names=['foo', None, 'baz']), + 'named_multi') +]) +def test_to_html_alignment_with_truncation( + datapath, row_index, row_type, column_index, column_type, + index, header, index_names): + # GH 22747, GH 22579 + df = DataFrame(np.arange(64).reshape(8, 8), + index=row_index, columns=column_index) + result = df.to_html( + max_rows=4, max_cols=4, + index=index, header=header, index_names=index_names) + + if not index: + row_type = 'none' + elif not index_names and row_type.startswith('named'): + row_type = 'un' + row_type + + if not header: + column_type = 'none' + elif not index_names and column_type.startswith('named'): + column_type = 'un' + column_type + + filename = 'trunc_df_index_' + row_type + '_columns_' + column_type + expected = expected_html(datapath, filename) + assert result == expected + + +@pytest.mark.parametrize('index', [False, 0]) +def test_to_html_truncation_index_false_max_rows(datapath, index): + # GH 15019 + data = [[1.764052, 0.400157], + [0.978738, 2.240893], + [1.867558, -0.977278], + [0.950088, -0.151357], + [-0.103219, 0.410599]] + df = DataFrame(data) + result = df.to_html(max_rows=4, index=index) + expected = expected_html(datapath, 'gh15019_expected_output') + assert result == expected + + +@pytest.mark.parametrize('index', [False, 0]) +@pytest.mark.parametrize('col_index_named, expected_output', [ + (False, 'gh22783_expected_output'), + (True, 'gh22783_named_columns_index') +]) +def test_to_html_truncation_index_false_max_cols( + datapath, index, col_index_named, expected_output): + # GH 22783 + data = [[1.764052, 0.400157, 0.978738, 2.240893, 1.867558], + [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599]] + df = DataFrame(data) + if col_index_named: + df.columns.rename('columns.name', inplace=True) + result = df.to_html(max_cols=4, index=index) + expected = expected_html(datapath, expected_output) + assert result == expected + + +@pytest.mark.parametrize('notebook', [True, False]) +def test_to_html_notebook_has_style(notebook): + df = DataFrame({"A": [1, 2, 3]}) + result = df.to_html(notebook=notebook) + + if notebook: assert "tbody tr th:only-of-type" in result assert "vertical-align: middle;" in result assert "thead th" in result - - def test_to_html_notebook_has_no_style(self): - df = DataFrame({"A": [1, 2, 3]}) - result = df.to_html() + else: assert "tbody tr th:only-of-type" not in result assert "vertical-align: middle;" not in result assert "thead th" not in result - def test_to_html_with_index_names_false(self): - # GH 16493 - df = DataFrame({"A": [1, 2]}, index=Index(['a', 'b'], - name='myindexname')) - result = df.to_html(index_names=False) - assert 'myindexname' not in result - - def test_to_html_with_id(self): - # GH 8496 - df = DataFrame({"A": [1, 2]}, index=Index(['a', 'b'], - name='myindexname')) - result = df.to_html(index_names=False, table_id="TEST_ID") - assert ' id="TEST_ID"' in result - - def test_to_html_float_format_no_fixed_width(self, datapath): - - # GH 21625 - df = DataFrame({'x': [0.19999]}) - expected = expected_html(datapath, 'gh21625_expected_output') - assert df.to_html(float_format='%.3f') == expected - - # GH 22270 - df = DataFrame({'x': [100.0]}) - expected = expected_html(datapath, 'gh22270_expected_output') - assert df.to_html(float_format='%.0f') == expected - - @pytest.mark.parametrize("render_links, file_name", [ - (True, 'render_links_true'), - (False, 'render_links_false'), - ]) - def test_to_html_render_links(self, render_links, file_name, datapath): - # GH 2679 - data = [ - [0, 'http://pandas.pydata.org/?q1=a&q2=b', 'pydata.org'], - [0, 'www.pydata.org', 'pydata.org'] - ] - df = DataFrame(data, columns=['foo', 'bar', None]) - - result = df.to_html(render_links=render_links) - expected = expected_html(datapath, file_name) - assert result == expected + +def test_to_html_with_index_names_false(): + # GH 16493 + df = DataFrame({"A": [1, 2]}, index=Index(['a', 'b'], + name='myindexname')) + result = df.to_html(index_names=False) + assert 'myindexname' not in result + + +def test_to_html_with_id(): + # GH 8496 + df = DataFrame({"A": [1, 2]}, index=Index(['a', 'b'], + name='myindexname')) + result = df.to_html(index_names=False, table_id="TEST_ID") + assert ' id="TEST_ID"' in result + + +@pytest.mark.parametrize('value,float_format,expected', [ + (0.19999, '%.3f', 'gh21625_expected_output'), + (100.0, '%.0f', 'gh22270_expected_output'), +]) +def test_to_html_float_format_no_fixed_width( + value, float_format, expected, datapath): + # GH 21625, GH 22270 + df = DataFrame({'x': [value]}) + expected = expected_html(datapath, expected) + result = df.to_html(float_format=float_format) + assert result == expected + + +@pytest.mark.parametrize("render_links,expected", [ + (True, 'render_links_true'), + (False, 'render_links_false'), +]) +def test_to_html_render_links(render_links, expected, datapath): + # GH 2679 + data = [ + [0, 'http://pandas.pydata.org/?q1=a&q2=b', 'pydata.org'], + [0, 'www.pydata.org', 'pydata.org'] + ] + df = DataFrame(data, columns=['foo', 'bar', None]) + + result = df.to_html(render_links=render_links) + expected = expected_html(datapath, expected) + assert result == expected
https://api.github.com/repos/pandas-dev/pandas/pulls/24609
2019-01-04T12:09:52Z
2019-01-04T13:55:12Z
2019-01-04T13:55:12Z
2019-01-04T17:41:48Z
REF: dispatch Series.quantile to DataFrame, remove ScalarBlock
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 7d6aa6a42efc2..7878613a8b1b1 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -5,8 +5,7 @@ make_block, # io.pytables, io.packers FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, TimeDeltaBlock, DatetimeBlock, DatetimeTZBlock, - CategoricalBlock, ExtensionBlock, ScalarBlock, - Block) + CategoricalBlock, ExtensionBlock, Block) from .managers import ( # noqa:F401 BlockManager, SingleBlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 384676ede15f2..f88114e1c9e20 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -222,12 +222,6 @@ def make_block(self, values, placement=None, ndim=None): return make_block(values, placement=placement, ndim=ndim) - def make_block_scalar(self, values): - """ - Create a ScalarBlock - """ - return ScalarBlock(values) - def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): """ Wrap given values in a block of same type as self. """ @@ -1468,13 +1462,15 @@ def quantile(self, qs, interpolation='linear', axis=0): else: # create the array of na_values # 2d len(values) * len(qs) - result = np.repeat(np.array([self._na_value] * len(qs)), + result = np.repeat(np.array([self.fill_value] * len(qs)), len(values)).reshape(len(values), len(qs)) else: - mask = isna(self.values) + # asarray needed for Sparse, see GH#24600 + # TODO: Why self.values and not values? + mask = np.asarray(isna(self.values)) result = nanpercentile(values, np.array(qs) * 100, - axis=axis, na_value=self._na_value, + axis=axis, na_value=self.fill_value, mask=mask, ndim=self.ndim, interpolation=interpolation) @@ -1490,8 +1486,6 @@ def quantile(self, qs, interpolation='linear', axis=0): ndim = getattr(result, 'ndim', None) or 0 result = self._try_coerce_result(result) - if lib.is_scalar(result): - return self.make_block_scalar(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) @@ -1534,29 +1528,6 @@ def _replace_coerce(self, to_replace, value, inplace=True, regex=False, return self -class ScalarBlock(Block): - """ - a scalar compat Block - """ - __slots__ = ['_mgr_locs', 'values', 'ndim'] - - def __init__(self, values): - self.ndim = 0 - self.mgr_locs = [0] - self.values = values - - @property - def dtype(self): - return type(self.values) - - @property - def shape(self): - return tuple([0]) - - def __len__(self): - return 0 - - class NonConsolidatableMixIn(object): """ hold methods for the nonconsolidatable blocks """ _can_consolidate = False @@ -2675,7 +2646,7 @@ def convert(self, *args, **kwargs): if args: raise NotImplementedError - by_item = True if 'by_item' not in kwargs else kwargs['by_item'] + by_item = kwargs.get('by_item', True) new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] new_style = False diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 0ad0a994e8a95..ab033ff4c1c4b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -425,6 +425,10 @@ def quantile(self, axis=0, consolidate=True, transposed=False, Block Manager (new object) """ + # Series dispatches to DataFrame for quantile, which allows us to + # simplify some of the code here and in the blocks + assert self.ndim >= 2 + if consolidate: self._consolidate_inplace() @@ -449,6 +453,7 @@ def get_axe(block, qs, axes): # note that some DatetimeTZ, Categorical are always ndim==1 ndim = {b.ndim for b in blocks} + assert 0 not in ndim, ndim if 2 in ndim: @@ -474,15 +479,7 @@ def get_axe(block, qs, axes): return self.__class__(blocks, new_axes) - # 0 ndim - if 0 in ndim and 1 not in ndim: - values = np.array([b.values for b in blocks]) - if len(values) == 1: - return values.item() - blocks = [make_block(values, ndim=1)] - axes = Index([ax[0] for ax in axes]) - - # single block + # single block, i.e. ndim == {1} values = _concat._concat_compat([b.values for b in blocks]) # compute the orderings of our original data diff --git a/pandas/core/series.py b/pandas/core/series.py index 46ff04fdd31ae..de34227cda28a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1987,15 +1987,23 @@ def quantile(self, q=0.5, interpolation='linear'): self._check_percentile(q) - result = self._data.quantile(qs=q, interpolation=interpolation) + # We dispatch to DataFrame so that core.internals only has to worry + # about 2D cases. + df = self.to_frame() + + result = df.quantile(q=q, interpolation=interpolation, + numeric_only=False) + if result.ndim == 2: + result = result.iloc[:, 0] if is_list_like(q): + result.name = self.name return self._constructor(result, index=Float64Index(q), name=self.name) else: # scalar - return result + return result.iloc[0] def corr(self, other, method='pearson', min_periods=None): """ diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 31199dc01b659..0efd48c25ad62 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -218,5 +218,5 @@ def test_resample_quantile_all_ts(series): q = 0.75 freq = 'H' result = s.resample(freq).quantile(q) - expected = s.resample(freq).agg(lambda x: x.quantile(q)) + expected = s.resample(freq).agg(lambda x: x.quantile(q)).rename(s.name) tm.assert_series_equal(result, expected)
ScalarBlock exists because sometimes Block.quantile needs to return a scalar. By having Series dispatch to DataFrame, we simplify quantile in internals an get to remove ScalarBlock and make_block_scalar
https://api.github.com/repos/pandas-dev/pandas/pulls/24606
2019-01-04T02:09:48Z
2019-01-04T12:11:57Z
2019-01-04T12:11:57Z
2019-01-04T16:20:05Z
24024 follow-up: fix incorrectly accepting iNaT in validate_fill_value
diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in index 3708deb1a4b76..7d9ba420525c8 100644 --- a/pandas/_libs/algos_common_helper.pxi.in +++ b/pandas/_libs/algos_common_helper.pxi.in @@ -109,8 +109,6 @@ def ensure_object(object arr): return arr else: return arr.astype(np.object_) - elif hasattr(arr, '_box_values_as_index'): - return arr._box_values_as_index() else: return np.array(arr, dtype=np.object_) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 9f1491bd68684..a55e8759deedb 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -588,7 +588,7 @@ def astype(self, dtype, copy=True): @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__) def _validate_fill_value(self, fill_value): - if isna(fill_value) or fill_value == iNaT: + if isna(fill_value): fill_value = iNaT elif isinstance(fill_value, (datetime, np.datetime64)): self._assert_tzawareness_compat(fill_value) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b55bad46580fe..6696d6d4ca83e 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -5,7 +5,7 @@ import numpy as np from pandas._libs import lib, tslib, tslibs -from pandas._libs.tslibs import OutOfBoundsDatetime, Period, iNaT +from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT from pandas.compat import PY3, string_types, text_type, to_str from .common import ( @@ -272,7 +272,7 @@ def maybe_promote(dtype, fill_value=np.nan): fill_value = tslibs.Timedelta(fill_value).value elif is_datetime64tz_dtype(dtype): if isna(fill_value): - fill_value = iNaT + fill_value = NaT elif is_extension_array_dtype(dtype) and isna(fill_value): fill_value = dtype.na_value elif is_float(fill_value): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index cfca5d1b7d2cc..082a314facdd6 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -203,15 +203,6 @@ def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', return type(self)._simple_new(result, name=self.name) return arg - def _box_values_as_index(self): - """ - Return object Index which contains boxed values. - """ - # XXX: this is broken (not called) for PeriodIndex, which doesn't - # define _box_values AFAICT - from pandas.core.index import Index - return Index(self._box_values(self.asi8), name=self.name, dtype=object) - def _box_values(self, values): return self._data._box_values(values) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index f76999a0dbc32..db88d94be1cab 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -388,6 +388,10 @@ def test_take_fill_valid(self, datetime_index, tz_naive_fixture): # Timestamp with mismatched tz-awareness arr.take([-1, 1], allow_fill=True, fill_value=now) + with pytest.raises(ValueError): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=pd.NaT.value) + def test_concat_same_type_invalid(self, datetime_index): # different timezones dti = datetime_index
remove box_values_as_index xref #23833, #23982 for overhaul of maybe_promote testing
https://api.github.com/repos/pandas-dev/pandas/pulls/24605
2019-01-04T00:34:26Z
2019-01-04T12:12:37Z
2019-01-04T12:12:37Z
2019-01-04T16:17:51Z
Fixed PeriodIndex._shallow_copy for i8
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index b59c32bb8a9d4..5e4dd2998a3be 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -322,13 +322,9 @@ def _shallow_copy(self, values=None, **kwargs): # this quite a bit. values = period_array(values, freq=self.freq) - # I don't like overloading shallow_copy with freq changes. - # See if it's used anywhere outside of test_resample_empty_dataframe + # We don't allow changing `freq` in _shallow_copy. + validate_dtype_freq(self.dtype, kwargs.get('freq')) attributes = self._get_attributes_dict() - freq = kwargs.pop("freq", None) - if freq: - values = values.asfreq(freq) - attributes.pop("freq", None) attributes.update(kwargs) if not len(values) and 'dtype' not in kwargs: diff --git a/pandas/core/resample.py b/pandas/core/resample.py index ee9137c264edc..25604b29f22f6 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -404,7 +404,10 @@ def _wrap_result(self, result): if isinstance(result, ABCSeries) and result.empty: obj = self.obj - result.index = obj.index._shallow_copy(freq=to_offset(self.freq)) + if isinstance(obj.index, PeriodIndex): + result.index = obj.index.asfreq(self.freq) + else: + result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 53f28612305c2..464ff7aa5d58d 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas._libs.tslibs.period import IncompatibleFrequency import pandas.util._test_decorators as td import pandas as pd @@ -40,9 +41,7 @@ def test_where(self): @pytest.mark.parametrize('use_numpy', [True, False]) @pytest.mark.parametrize('index', [ pd.period_range('2000-01-01', periods=3, freq='D'), - pytest.param( - pd.period_range('2001-01-01', periods=3, freq='2D'), - marks=pytest.mark.xfail(reason='GH 24391')), + pd.period_range('2001-01-01', periods=3, freq='2D'), pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')]) def test_repeat_freqstr(self, index, use_numpy): # GH10183 @@ -117,6 +116,17 @@ def test_shallow_copy_empty(self): tm.assert_index_equal(result, expected) + def test_shallow_copy_i8(self): + # GH-24391 + pi = period_range("2018-01-01", periods=3, freq="2D") + result = pi._shallow_copy(pi.asi8, freq=pi.freq) + tm.assert_index_equal(result, pi) + + def test_shallow_copy_changing_freq_raises(self): + pi = period_range("2018-01-01", periods=3, freq="2D") + with pytest.raises(IncompatibleFrequency, match="are different"): + pi._shallow_copy(pi, freq="H") + def test_dtype_str(self): pi = pd.PeriodIndex([], freq='M') assert pi.dtype_str == 'period[M]' diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 0efd48c25ad62..911cd990ab881 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -109,7 +109,10 @@ def test_resample_empty_series_all_ts(freq, empty_series, resample_method): result = getattr(s.resample(freq), resample_method)() expected = s.copy() - expected.index = s.index._shallow_copy(freq=freq) + if isinstance(s.index, PeriodIndex): + expected.index = s.index.asfreq(freq=freq) + else: + expected.index = s.index._shallow_copy(freq=freq) assert_index_equal(result.index, expected.index) assert result.index.freq == expected.index.freq assert_series_equal(result, expected, check_dtype=False) @@ -127,7 +130,10 @@ def test_resample_empty_dataframe_all_ts(empty_frame, freq, resample_method): # GH14962 expected = Series([]) - expected.index = df.index._shallow_copy(freq=freq) + if isinstance(df.index, PeriodIndex): + expected.index = df.index.asfreq(freq=freq) + else: + expected.index = df.index._shallow_copy(freq=freq) assert_index_equal(result.index, expected.index) assert result.index.freq == expected.index.freq assert_almost_equal(result, expected, check_dtype=False)
Closes https://github.com/pandas-dev/pandas/issues/24391 cc @jschendel
https://api.github.com/repos/pandas-dev/pandas/pulls/24604
2019-01-03T21:55:30Z
2019-01-04T17:57:42Z
2019-01-04T17:57:42Z
2019-01-04T19:53:58Z
DOC: Update doc description for day_opt in offsets
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 0ca9410df89c0..c2f51436612a4 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -903,9 +903,13 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1: Parameters ---------- other : datetime or Timestamp - day_opt : 'start', 'end' + day_opt : 'start', 'end', 'business_start', 'business_end', or int 'start': returns 1 'end': returns last day of the month + 'business_start': returns the first business day of the month + 'business_end': returns the last business day of the month + int: returns the day in the month indicated by `other`, or the last of + day the month if the value exceeds in that month's number of days. Returns ------- @@ -980,7 +984,7 @@ def roll_qtrday(other: datetime, n: int, month: int, other : datetime or Timestamp n : number of periods to increment, before adjusting for rolling month : int reference month giving the first month of the year - day_opt : 'start', 'end', 'business_start', 'business_end' + day_opt : 'start', 'end', 'business_start', 'business_end', or int The convention to use in finding the day in a given month against which to compare for rollforward/rollbackward decisions. modby : int 3 for quarters, 12 for years @@ -988,6 +992,10 @@ def roll_qtrday(other: datetime, n: int, month: int, Returns ------- n : int number of periods to increment + + See Also + -------- + get_day_of_month : Find the day in a month provided an offset. """ cdef: int months_since @@ -1022,9 +1030,16 @@ def roll_yearday(other: datetime, n: int, month: int, day_opt: object) -> int: other : datetime or Timestamp n : number of periods to increment, before adjusting for rolling month : reference month giving the first month of the year - day_opt : 'start', 'end' - 'start': returns 1 - 'end': returns last day of the month + day_opt : 'start', 'end', 'business_start', 'business_end', or int + The day of the month to compare against that of `other` when + incrementing or decrementing the number of periods: + + 'start': 1 + 'end': last day of the month + 'business_start': first business day of the month + 'business_end': last business day of the month + int: day in the month indicated by `other`, or the last of day + the month if the value exceeds in that month's number of days. Returns -------
Follow-up to #24585.
https://api.github.com/repos/pandas-dev/pandas/pulls/24602
2019-01-03T20:42:12Z
2019-01-03T21:46:01Z
2019-01-03T21:46:01Z
2019-01-03T21:47:00Z
Rename DatetimeArray and TimedeltaArray
diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py index 5433d11eccff9..7d9b1b7c7a659 100644 --- a/pandas/arrays/__init__.py +++ b/pandas/arrays/__init__.py @@ -6,8 +6,8 @@ from pandas.core.arrays import ( IntervalArray, PeriodArray, Categorical, SparseArray, IntegerArray, PandasArray, - DatetimeArrayMixin as DatetimeArray, - TimedeltaArrayMixin as TimedeltaArray, + DatetimeArray, + TimedeltaArray, ) diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index d6a61a26a954f..1033ce784046e 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -3,10 +3,10 @@ ExtensionOpsMixin, ExtensionScalarOpsMixin) from .categorical import Categorical # noqa -from .datetimes import DatetimeArrayMixin # noqa +from .datetimes import DatetimeArray # noqa from .interval import IntervalArray # noqa from .period import PeriodArray, period_array # noqa -from .timedeltas import TimedeltaArrayMixin # noqa +from .timedeltas import TimedeltaArray # noqa from .integer import ( # noqa IntegerArray, integer_array) from .sparse import SparseArray # noqa diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index 4e84c62bce3d6..04842d82fca5d 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -184,8 +184,8 @@ def array(data, # type: Sequence[object] """ from pandas.core.arrays import ( period_array, ExtensionArray, IntervalArray, PandasArray, - DatetimeArrayMixin, - TimedeltaArrayMixin, + DatetimeArray, + TimedeltaArray, ) from pandas.core.internals.arrays import extract_array @@ -228,14 +228,14 @@ def array(data, # type: Sequence[object] elif inferred_dtype.startswith('datetime'): # datetime, datetime64 try: - return DatetimeArrayMixin._from_sequence(data, copy=copy) + return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith('timedelta'): # timedelta, timedelta64 - return TimedeltaArrayMixin._from_sequence(data, copy=copy) + return TimedeltaArray._from_sequence(data, copy=copy) # TODO(BooleanArray): handle this type diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 8b5445bedd46c..65f9bb14158bb 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1228,9 +1228,9 @@ def __add__(self, other): return NotImplemented if is_timedelta64_dtype(result) and isinstance(result, np.ndarray): - from pandas.core.arrays import TimedeltaArrayMixin + from pandas.core.arrays import TimedeltaArray # TODO: infer freq? - return TimedeltaArrayMixin(result) + return TimedeltaArray(result) return result def __radd__(self, other): @@ -1295,9 +1295,9 @@ def __sub__(self, other): return NotImplemented if is_timedelta64_dtype(result) and isinstance(result, np.ndarray): - from pandas.core.arrays import TimedeltaArrayMixin + from pandas.core.arrays import TimedeltaArray # TODO: infer freq? - return TimedeltaArrayMixin(result) + return TimedeltaArray(result) return result def __rsub__(self, other): @@ -1306,8 +1306,8 @@ def __rsub__(self, other): # we need to wrap in DatetimeArray/Index and flip the operation if not isinstance(other, DatetimeLikeArrayMixin): # Avoid down-casting DatetimeIndex - from pandas.core.arrays import DatetimeArrayMixin - other = DatetimeArrayMixin(other) + from pandas.core.arrays import DatetimeArray + other = DatetimeArray(other) return other - self elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and not is_datetime64_any_dtype(other)): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c428fd2e75e08..520121710cbd4 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -127,7 +127,7 @@ def wrapper(self, other): except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, - DatetimeArrayMixin)): + DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) @@ -176,9 +176,9 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin, - dtl.TimelikeOps, - dtl.DatelikeOps): +class DatetimeArray(dtl.DatetimeLikeArrayMixin, + dtl.TimelikeOps, + dtl.DatelikeOps): """ Pandas ExtensionArray for tz-naive or tz-aware datetime data. @@ -718,7 +718,7 @@ def _add_delta(self, delta): ------- result : DatetimeArray """ - new_values = super(DatetimeArrayMixin, self)._add_delta(delta) + new_values = super(DatetimeArray, self)._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer') # ----------------------------------------------------------------- @@ -1135,10 +1135,10 @@ def to_perioddelta(self, freq): TimedeltaArray/Index """ # TODO: consider privatizing (discussion in GH#23113) - from pandas.core.arrays.timedeltas import TimedeltaArrayMixin + from pandas.core.arrays.timedeltas import TimedeltaArray i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view('m8[ns]') - return TimedeltaArrayMixin(m8delta) + return TimedeltaArray(m8delta) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods @@ -1610,7 +1610,7 @@ def to_julian_date(self): ) / 24.0) -DatetimeArrayMixin._add_comparison_ops() +DatetimeArray._add_comparison_ops() # ------------------------------------------------------------------- diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 70da02f2ba0a1..0eeb3f718734a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -327,7 +327,7 @@ def to_timestamp(self, freq=None, how='start'): ------- DatetimeArray/Index """ - from pandas.core.arrays import DatetimeArrayMixin + from pandas.core.arrays import DatetimeArray how = libperiod._validate_end_alias(how) @@ -351,7 +351,7 @@ def to_timestamp(self, freq=None, how='start'): new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) - return DatetimeArrayMixin._from_sequence(new_data, freq='infer') + return DatetimeArray._from_sequence(new_data, freq='infer') # -------------------------------------------------------------------- # Array-like / EA-Interface Methods diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 3677d041886b3..0ccf82ebf7edd 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -102,7 +102,7 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): +class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): _typ = "timedeltaarray" _scalar_type = Timedelta __array_priority__ = 1000 @@ -348,7 +348,7 @@ def _add_delta(self, delta): ------- result : TimedeltaArray """ - new_values = super(TimedeltaArrayMixin, self)._add_delta(delta) + new_values = super(TimedeltaArray, self)._add_delta(delta) return type(self)._from_sequence(new_values, freq='infer') def _add_datetime_arraylike(self, other): @@ -357,15 +357,15 @@ def _add_datetime_arraylike(self, other): """ if isinstance(other, np.ndarray): # At this point we have already checked that dtype is datetime64 - from pandas.core.arrays import DatetimeArrayMixin - other = DatetimeArrayMixin(other) + from pandas.core.arrays import DatetimeArray + other = DatetimeArray(other) # defer to implementation in DatetimeArray return other + self def _add_datetimelike_scalar(self, other): # adding a timedeltaindex to a datetimelike - from pandas.core.arrays import DatetimeArrayMixin + from pandas.core.arrays import DatetimeArray assert other is not NaT other = Timestamp(other) @@ -373,14 +373,14 @@ def _add_datetimelike_scalar(self, other): # In this case we specifically interpret NaT as a datetime, not # the timedelta interpretation we would get by returning self + NaT result = self.asi8.view('m8[ms]') + NaT.to_datetime64() - return DatetimeArrayMixin(result) + return DatetimeArray(result) i8 = self.asi8 result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan) result = self._maybe_mask_results(result) dtype = DatetimeTZDtype(tz=other.tz) if other.tz else _NS_DTYPE - return DatetimeArrayMixin(result, dtype=dtype, freq=self.freq) + return DatetimeArray(result, dtype=dtype, freq=self.freq) def _addsub_offset_array(self, other, op): # Add or subtract Array-like of DateOffset objects @@ -388,7 +388,7 @@ def _addsub_offset_array(self, other, op): # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - return super(TimedeltaArrayMixin, self)._addsub_offset_array( + return super(TimedeltaArray, self)._addsub_offset_array( other, op ) except AttributeError: @@ -813,7 +813,7 @@ def f(x): return result -TimedeltaArrayMixin._add_comparison_ops() +TimedeltaArray._add_comparison_ops() # --------------------------------------------------------------------- @@ -860,7 +860,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): data = np.array(data, copy=False) elif isinstance(data, ABCSeries): data = data._values - elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArrayMixin)): + elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)): inferred_freq = data.freq data = data._data diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 9e2564c4f825b..ac69927d4adf1 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -572,8 +572,8 @@ def construct_array_type(cls): ------- type """ - from pandas.core.arrays import DatetimeArrayMixin - return DatetimeArrayMixin + from pandas.core.arrays import DatetimeArray + return DatetimeArray @classmethod def construct_from_string(cls, string): diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 842fcd0680467..c43469d3c3a81 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -11,9 +11,7 @@ from pandas.core.accessor import PandasDelegate, delegate_names from pandas.core.algorithms import take_1d -from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, PeriodArray, - TimedeltaArrayMixin as TimedeltaArray) +from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.base import NoNewAttributesMixin, PandasObject from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7d901f4656731..f396f081267b3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -22,7 +22,7 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays.datetimes import ( - DatetimeArrayMixin as DatetimeArray, _to_M8, validate_tz_from_dtype) + DatetimeArray, _to_M8, validate_tz_from_dtype) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 5e8e6a423ab3f..9301638d4f632 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -17,8 +17,7 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays import datetimelike as dtl -from pandas.core.arrays.timedeltas import ( - TimedeltaArrayMixin as TimedeltaArray, _is_convertible_to_td) +from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3b2c13af785d4..bd16495e472b1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -35,8 +35,7 @@ import pandas.core.algorithms as algos from pandas.core.arrays import ( - Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray, - TimedeltaArrayMixin as TimedeltaArray) + Categorical, DatetimeArray, ExtensionArray, TimedeltaArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex diff --git a/pandas/core/series.py b/pandas/core/series.py index 52b60339a7d68..46ff04fdd31ae 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1586,7 +1586,7 @@ def unique(self): >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() - <DatetimeArrayMixin> + <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 69d735d7fdc65..5b540ee88a3f3 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -171,7 +171,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, - ndarray of Timestamps if box=False """ from pandas import DatetimeIndex - from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray + from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import ( maybe_convert_dtype, objects_to_datetime64ns) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index f5c4808a09123..f20d9a54e9da3 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1981,7 +1981,7 @@ def test_dti_sub_tdi(self, tz_naive_fixture): result = dti - tdi tm.assert_index_equal(result, expected) - msg = 'cannot subtract .*TimedeltaArrayMixin' + msg = 'cannot subtract .*TimedeltaArray' with pytest.raises(TypeError, match=msg): tdi - dti @@ -1989,7 +1989,7 @@ def test_dti_sub_tdi(self, tz_naive_fixture): result = dti - tdi.values tm.assert_index_equal(result, expected) - msg = 'cannot subtract DatetimeArrayMixin from' + msg = 'cannot subtract DatetimeArray from' with pytest.raises(TypeError, match=msg): tdi.values - dti @@ -2005,7 +2005,7 @@ def test_dti_isub_tdi(self, tz_naive_fixture): result -= tdi tm.assert_index_equal(result, expected) - msg = 'cannot subtract .* from a TimedeltaArrayMixin' + msg = 'cannot subtract .* from a TimedeltaArray' with pytest.raises(TypeError, match=msg): tdi -= dti @@ -2016,7 +2016,7 @@ def test_dti_isub_tdi(self, tz_naive_fixture): msg = '|'.join(['cannot perform __neg__ with this index type:', 'ufunc subtract cannot use operands with types', - 'cannot subtract DatetimeArrayMixin from']) + 'cannot subtract DatetimeArray from']) with pytest.raises(TypeError, match=msg): tdi.values -= dti @@ -2036,9 +2036,9 @@ def test_dti_isub_tdi(self, tz_naive_fixture): def test_add_datetimelike_and_dti(self, addend, tz): # GH#9631 dti = DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize(tz) - msg = ('cannot add DatetimeArrayMixin and {0}' + msg = ('cannot add DatetimeArray and {0}' .format(type(addend).__name__)).replace('DatetimeIndex', - 'DatetimeArrayMixin') + 'DatetimeArray') with pytest.raises(TypeError, match=msg): dti + addend with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 348ac4579ffb5..f76999a0dbc32 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -5,9 +5,7 @@ import pandas.compat as compat import pandas as pd -from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, PeriodArray, - TimedeltaArrayMixin as TimedeltaArray) +from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray import pandas.util.testing as tm diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 1375969c961fd..8890593b1fa9d 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -10,7 +10,7 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 08ef27297cca5..481350640e1a6 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -from pandas.core.arrays import TimedeltaArrayMixin as TimedeltaArray +from pandas.core.arrays import TimedeltaArray import pandas.util.testing as tm diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index 96f92fccc5a71..1622088d05f4d 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -22,8 +22,8 @@ class TestABCClasses(object): sparse_series = pd.Series([1, 2, 3]).to_sparse() sparse_array = pd.SparseArray(np.random.randn(10)) sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]}) - datetime_array = pd.core.arrays.DatetimeArrayMixin(datetime_index) - timedelta_array = pd.core.arrays.TimedeltaArrayMixin(timedelta_index) + datetime_array = pd.core.arrays.DatetimeArray(datetime_index) + timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index) def test_abc_types(self): assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex) diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 7c4491d6edbcf..00ad35bf6a924 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -4,7 +4,7 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.core.arrays import DatetimeArray from pandas.tests.extension import base @@ -129,7 +129,7 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators): def test_add_series_with_extension_array(self, data): # Datetime + Datetime not implemented s = pd.Series(data) - msg = 'cannot add DatetimeArray(Mixin)? and DatetimeArray(Mixin)?' + msg = 'cannot add DatetimeArray and DatetimeArray' with pytest.raises(TypeError, match=msg): s + data diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index cda7a005c40c7..562be4cf85864 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -177,7 +177,7 @@ def test_astype_object_with_nat(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) - msg = 'Cannot cast DatetimeArrayMixin to dtype' + msg = 'Cannot cast DatetimeArray to dtype' with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index bca99d27bda56..97de4cd98dedf 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -13,8 +13,7 @@ from pandas import ( DatetimeIndex, Index, Timestamp, date_range, datetime, offsets, to_datetime) -from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, period_array) +from pandas.core.arrays import DatetimeArray, period_array import pandas.util.testing as tm diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index deb1850a8b483..50c8f8d4c1f4c 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -24,7 +24,7 @@ from pandas import ( DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat, date_range, isna, to_datetime) -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.core.arrays import DatetimeArray from pandas.core.tools import datetimes as tools from pandas.util import testing as tm from pandas.util.testing import assert_series_equal diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index ae0dbf24f048e..3f5507612c8e6 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -83,7 +83,7 @@ def test_astype_timedelta64(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) - msg = 'Cannot cast TimedeltaArrayMixin to dtype' + msg = 'Cannot cast TimedeltaArray to dtype' with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index b9bbfaff06215..76f79e86e6f11 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Timedelta, TimedeltaIndex, timedelta_range, to_timedelta -from pandas.core.arrays import TimedeltaArrayMixin as TimedeltaArray +from pandas.core.arrays import TimedeltaArray import pandas.util.testing as tm diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index b9196971d2e53..7147761d23caa 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -15,8 +15,8 @@ from pandas.compat import OrderedDict, lrange from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, - TimedeltaArrayMixin as TimedeltaArray, + DatetimeArray, + TimedeltaArray, ) from pandas.core.internals import (SingleBlockManager, make_block, BlockManager) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 294eae9d45bee..42e9b1f5af8ad 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -22,7 +22,7 @@ Categorical, CategoricalIndex, DatetimeIndex, Index, IntervalIndex, Series, Timestamp, compat) import pandas.core.algorithms as algos -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.core.arrays import DatetimeArray import pandas.core.common as com import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index f60d73ea1b05b..657f5f193c85e 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -23,9 +23,7 @@ CategoricalIndex, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex, Panel, PeriodIndex, Series, Timedelta, TimedeltaIndex, Timestamp) from pandas.core.accessor import PandasDelegate -from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, PandasArray, - TimedeltaArrayMixin as TimedeltaArray) +from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray from pandas.core.base import NoNewAttributesMixin, PandasObject from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin import pandas.util.testing as tm diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index cc793767d3af6..1e65118194be7 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import Series, isna -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.core.arrays import DatetimeArray import pandas.core.nanops as nanops import pandas.util.testing as tm diff --git a/pandas/util/testing.py b/pandas/util/testing.py index ebdfde2da24f8..2df43cd678764 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -36,8 +36,8 @@ IntervalIndex, MultiIndex, Panel, RangeIndex, Series, bdate_range) from pandas.core.algorithms import take_1d from pandas.core.arrays import ( - DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray, - PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array) + DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray, + period_array) import pandas.core.common as com from pandas.io.common import urlopen
Closes #24231
https://api.github.com/repos/pandas-dev/pandas/pulls/24601
2019-01-03T20:24:28Z
2019-01-03T21:06:25Z
2019-01-03T21:06:25Z
2019-01-03T21:13:43Z
Fixed construction / factorization of empty PA and IA
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 0e3c59120415d..2e7216108a23e 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -217,6 +217,11 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): @classmethod def _from_factorized(cls, values, original): + if len(values) == 0: + # An empty array returns object-dtype here. We can't create + # a new IA from an (empty) object-dtype array, so turn it into the + # correct dtype. + values = values.astype(original.dtype.subtype) return cls(values, closed=original.closed) _interval_shared_docs['from_breaks'] = """ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 70da02f2ba0a1..6e3dc6f789cc9 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -189,6 +189,13 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): freq = dtype.freq else: freq = None + + if isinstance(scalars, cls): + validate_dtype_freq(scalars.dtype, freq) + if copy: + scalars = scalars.copy() + return scalars + periods = np.asarray(scalars, dtype=object) if copy: periods = periods.copy() diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index 387eaa5223bbe..affe3b3854490 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -225,8 +225,7 @@ def test_sub_period(): def test_where_different_freq_raises(other): ser = pd.Series(period_array(['2000', '2001', '2002'], freq='D')) cond = np.array([True, False, True]) - with pytest.raises(IncompatibleFrequency, - match="Input has different freq=H"): + with pytest.raises(IncompatibleFrequency, match="freq"): ser.where(cond, other) diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index f259e66e6cc76..2ace0fadc73e9 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -44,6 +44,11 @@ class TestConstructors(BaseArrowTests, base.BaseConstructorsTests): def test_from_dtype(self, data): pytest.skip("GH-22666") + # seems like some bug in isna on empty BoolArray returning floats. + @pytest.mark.xfail(reason='bad is-na for empty data') + def test_from_sequence_from_cls(self, data): + super(TestConstructors, self).test_from_sequence_from_cls(data) + class TestReduce(base.BaseNoReduceTests): def test_reduce_series_boolean(self): diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index 9c719b1304629..231a1f648f8e8 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -9,6 +9,14 @@ class BaseConstructorsTests(BaseExtensionTests): + def test_from_sequence_from_cls(self, data): + result = type(data)._from_sequence(data, dtype=data.dtype) + self.assert_extension_array_equal(result, data) + + data = data[:0] + result = type(data)._from_sequence(data, dtype=data.dtype) + self.assert_extension_array_equal(result, data) + def test_array_from_scalars(self, data): scalars = [data[0], data[1], data[2]] result = data._from_sequence(scalars) diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 2c04c4cd99801..f64df7a84b7c0 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -105,6 +105,14 @@ def test_factorize_equivalence(self, data_for_grouping, na_sentinel): tm.assert_numpy_array_equal(l1, l2) self.assert_extension_array_equal(u1, u2) + def test_factorize_empty(self, data): + labels, uniques = pd.factorize(data[:0]) + expected_labels = np.array([], dtype=np.intp) + expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype) + + tm.assert_numpy_array_equal(labels, expected_labels) + self.assert_extension_array_equal(uniques, expected_uniques) + def test_fillna_copy_frame(self, data_missing): arr = data_missing.take([1, 1]) df = pd.DataFrame({"A": arr}) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index bd50584406312..10fd21f89c564 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -179,6 +179,9 @@ def _concat_same_type(cls, to_concat): def _values_for_factorize(self): frozen = self._values_for_argsort() + if len(frozen) == 0: + # _factorize_array expects 1-d array, this is a len-0 2-d array. + frozen = frozen.ravel() return frozen, () def _values_for_argsort(self):
Closes https://github.com/pandas-dev/pandas/issues/23933
https://api.github.com/repos/pandas-dev/pandas/pulls/24599
2019-01-03T19:17:18Z
2019-01-03T22:04:56Z
2019-01-03T22:04:55Z
2019-01-03T22:04:58Z
DOC: Remove flake8 errors for basics.rst and contributing_docstring.rst
diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst index 7c7847a47a1a2..f7e2b42a1ccbd 100644 --- a/doc/source/contributing_docstring.rst +++ b/doc/source/contributing_docstring.rst @@ -457,12 +457,14 @@ For example, with a single value: float Random number generated. """ - return random.random() + return np.random.random() With more than one value: .. code-block:: python + import string + def random_letters(): """ Generate and return a sequence of random letters. @@ -477,8 +479,8 @@ With more than one value: letters : str String of random letters. """ - length = random.randint(1, 10) - letters = ''.join(random.choice(string.ascii_lowercase) + length = np.random.randint(1, 10) + letters = ''.join(np.random.choice(string.ascii_lowercase) for i in range(length)) return length, letters @@ -499,7 +501,7 @@ If the method yields its value: Random number generated. """ while True: - yield random.random() + yield np.random.random() .. _docstring.see_also: @@ -686,8 +688,8 @@ shown: .. code-block:: python - import numpy as np # noqa: F401 - import pandas as pd # noqa: F401 + import numpy as np + import pandas as pd Any other module used in the examples must be explicitly imported, one per line (as recommended in :pep:`8#imports`) @@ -776,7 +778,7 @@ positional arguments ``head(3)``. Examples -------- - >>> s = pd.Series('Antelope', 'Lion', 'Zebra', numpy.nan) + >>> s = pd.Series('Antelope', 'Lion', 'Zebra', np.nan) >>> s.contains(pattern='a') 0 False 1 False @@ -834,7 +836,7 @@ positional arguments ``head(3)``. -------- >>> import numpy as np >>> import pandas as pd - >>> df = pd.DataFrame(numpy.random.randn(3, 3), + >>> df = pd.DataFrame(np.random.randn(3, 3), ... columns=('a', 'b', 'c')) >>> df.method(1) 21
xref: #24173
https://api.github.com/repos/pandas-dev/pandas/pulls/24598
2019-01-03T18:03:57Z
2019-01-04T12:22:10Z
2019-01-04T12:22:10Z
2019-08-26T14:12:16Z
REF: Simplify quantile, remove reduction from BlockManager
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bd16495e472b1..384676ede15f2 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -42,6 +42,7 @@ from pandas.core.indexing import check_setitem_lengths from pandas.core.internals.arrays import extract_array import pandas.core.missing as missing +from pandas.core.nanops import nanpercentile from pandas.io.formats.printing import pprint_thing @@ -1438,7 +1439,7 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): blocks = [make_block(new_values, placement=new_placement)] return blocks, mask - def quantile(self, qs, interpolation='linear', axis=0, axes=None): + def quantile(self, qs, interpolation='linear', axis=0): """ compute the quantiles of the @@ -1447,94 +1448,53 @@ def quantile(self, qs, interpolation='linear', axis=0, axes=None): qs: a scalar or list of the quantiles to be computed interpolation: type of interpolation, default 'linear' axis: axis to compute, default 0 - axes : BlockManager.axes Returns ------- - tuple of (axis, block) - + Block """ - kw = {'interpolation': interpolation} values = self.get_values() values, _ = self._try_coerce_args(values, values) - def _nanpercentile1D(values, mask, q, **kw): - # mask is Union[ExtensionArray, ndarray] - values = values[~mask] - - if len(values) == 0: - if lib.is_scalar(q): - return self._na_value - else: - return np.array([self._na_value] * len(q), - dtype=values.dtype) - - return np.percentile(values, q, **kw) - - def _nanpercentile(values, q, axis, **kw): - - mask = isna(self.values) - if not lib.is_scalar(mask) and mask.any(): - if self.ndim == 1: - return _nanpercentile1D(values, mask, q, **kw) - else: - # for nonconsolidatable blocks mask is 1D, but values 2D - if mask.ndim < values.ndim: - mask = mask.reshape(values.shape) - if axis == 0: - values = values.T - mask = mask.T - result = [_nanpercentile1D(val, m, q, **kw) for (val, m) - in zip(list(values), list(mask))] - result = np.array(result, dtype=values.dtype, copy=False).T - return result - else: - return np.percentile(values, q, axis=axis, **kw) - - from pandas import Float64Index is_empty = values.shape[axis] == 0 - if is_list_like(qs): - ax = Float64Index(qs) + orig_scalar = not is_list_like(qs) + if orig_scalar: + # make list-like, unpack later + qs = [qs] - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - # create the array of na_values - # 2d len(values) * len(qs) - result = np.repeat(np.array([self._na_value] * len(qs)), - len(values)).reshape(len(values), - len(qs)) + if is_empty: + if self.ndim == 1: + result = self._na_value else: - result = _nanpercentile(values, np.array(qs) * 100, - axis=axis, **kw) - - result = np.array(result, copy=False) - if self.ndim > 1: - result = result.T - + # create the array of na_values + # 2d len(values) * len(qs) + result = np.repeat(np.array([self._na_value] * len(qs)), + len(values)).reshape(len(values), + len(qs)) else: + mask = isna(self.values) + result = nanpercentile(values, np.array(qs) * 100, + axis=axis, na_value=self._na_value, + mask=mask, ndim=self.ndim, + interpolation=interpolation) - if self.ndim == 1: - ax = Float64Index([qs]) - else: - ax = axes[0] + result = np.array(result, copy=False) + if self.ndim > 1: + result = result.T - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - result = np.array([self._na_value] * len(self)) - else: - result = _nanpercentile(values, qs * 100, axis=axis, **kw) + if orig_scalar and not lib.is_scalar(result): + # result could be scalar in case with is_empty and self.ndim == 1 + assert result.shape[-1] == 1, result.shape + result = result[..., 0] + result = lib.item_from_zerodim(result) ndim = getattr(result, 'ndim', None) or 0 result = self._try_coerce_result(result) if lib.is_scalar(result): - return ax, self.make_block_scalar(result) - return ax, make_block(result, - placement=np.arange(len(result)), - ndim=ndim) + return self.make_block_scalar(result) + return make_block(result, + placement=np.arange(len(result)), + ndim=ndim) def _replace_coerce(self, to_replace, value, inplace=True, regex=False, convert=False, mask=None): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index eba49d18431ef..0ad0a994e8a95 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -16,7 +16,7 @@ maybe_promote) from pandas.core.dtypes.common import ( _NS_DTYPE, is_datetimelike_v_numeric, is_extension_array_dtype, - is_extension_type, is_numeric_v_string_like, is_scalar) + is_extension_type, is_list_like, is_numeric_v_string_like, is_scalar) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries from pandas.core.dtypes.missing import isna @@ -402,34 +402,47 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, bm._consolidate_inplace() return bm - def reduction(self, f, axis=0, consolidate=True, transposed=False, - **kwargs): + def quantile(self, axis=0, consolidate=True, transposed=False, + interpolation='linear', qs=None, numeric_only=None): """ - iterate over the blocks, collect and create a new block manager. + Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- - f: the callable or function name to operate on at the block level axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data + interpolation : type of interpolation, default 'linear' + qs : a scalar or list of the quantiles to be computed + numeric_only : ignored Returns ------- Block Manager (new object) - """ if consolidate: self._consolidate_inplace() + def get_axe(block, qs, axes): + from pandas import Float64Index + if is_list_like(qs): + ax = Float64Index(qs) + elif block.ndim == 1: + ax = Float64Index([qs]) + else: + ax = axes[0] + return ax + axes, blocks = [], [] for b in self.blocks: - axe, block = getattr(b, f)(axis=axis, axes=self.axes, **kwargs) + block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) + + axe = get_axe(b, qs, axes=self.axes) axes.append(axe) blocks.append(block) @@ -496,9 +509,6 @@ def isna(self, func, **kwargs): def where(self, **kwargs): return self.apply('where', **kwargs) - def quantile(self, **kwargs): - return self.reduction('quantile', **kwargs) - def setitem(self, **kwargs): return self.apply('setitem', **kwargs) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index f95c133163ddb..89e191f171f97 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1194,3 +1194,75 @@ def f(x, y): nanle = make_nancomp(operator.le) naneq = make_nancomp(operator.eq) nanne = make_nancomp(operator.ne) + + +def _nanpercentile_1d(values, mask, q, na_value, interpolation): + """ + Wraper for np.percentile that skips missing values, specialized to + 1-dimensional case. + + Parameters + ---------- + values : array over which to find quantiles + mask : ndarray[bool] + locations in values that should be considered missing + q : scalar or array of quantile indices to find + na_value : scalar + value to return for empty or all-null values + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + # mask is Union[ExtensionArray, ndarray] + values = values[~mask] + + if len(values) == 0: + if lib.is_scalar(q): + return na_value + else: + return np.array([na_value] * len(q), + dtype=values.dtype) + + return np.percentile(values, q, interpolation=interpolation) + + +def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation): + """ + Wraper for np.percentile that skips missing values. + + Parameters + ---------- + values : array over which to find quantiles + q : scalar or array of quantile indices to find + axis : {0, 1} + na_value : scalar + value to return for empty or all-null values + mask : ndarray[bool] + locations in values that should be considered missing + ndim : {1, 2} + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + if not lib.is_scalar(mask) and mask.any(): + if ndim == 1: + return _nanpercentile_1d(values, mask, q, na_value, + interpolation=interpolation) + else: + # for nonconsolidatable blocks mask is 1D, but values 2D + if mask.ndim < values.ndim: + mask = mask.reshape(values.shape) + if axis == 0: + values = values.T + mask = mask.T + result = [_nanpercentile_1d(val, m, q, na_value, + interpolation=interpolation) + for (val, m) in zip(list(values), list(mask))] + result = np.array(result, dtype=values.dtype, copy=False).T + return result + else: + return np.percentile(values, q, axis=axis, interpolation=interpolation)
BlockManager.reduction is only ever called for quantile. Might as well remove the layer of indirection so we can simplify reduction (now renamed quantile). Most of the simplification comes in Block.quantile, since we can avoid passing around things we don't need. Two nested functions currently defined inside Block.quantile are moved outside the closure so I don't have to double-check the namespace every time I look at them. Not sure if they belong somewhere else.
https://api.github.com/repos/pandas-dev/pandas/pulls/24597
2019-01-03T18:03:27Z
2019-01-03T23:10:40Z
2019-01-03T23:10:40Z
2019-01-03T23:21:09Z
DEPR: __array__ for tz-aware Series/Index
diff --git a/doc/source/api/series.rst b/doc/source/api/series.rst index 7d5e6037b012a..8e4c378b9fefe 100644 --- a/doc/source/api/series.rst +++ b/doc/source/api/series.rst @@ -26,6 +26,7 @@ Attributes .. autosummary:: :toctree: generated/ + Series.array Series.values Series.dtype Series.ftype @@ -58,10 +59,12 @@ Conversion Series.convert_objects Series.copy Series.bool + Series.to_numpy Series.to_period Series.to_timestamp Series.to_list Series.get_values + Series.__array__ Indexing, iteration ------------------- diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3be87c4cabaf0..f9a4a2b005045 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1227,7 +1227,7 @@ Deprecations .. _whatsnew_0240.deprecations.datetimelike_int_ops: Integer Addition/Subtraction with Datetimes and Timedeltas is Deprecated -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the past, users could—in some cases—add or subtract integers or integer-dtype arrays from :class:`Timestamp`, :class:`DatetimeIndex` and :class:`TimedeltaIndex`. @@ -1265,6 +1265,74 @@ the object's ``freq`` attribute (:issue:`21939`, :issue:`23878`). dti = pd.date_range('2001-01-01', periods=2, freq='7D') dti + pd.Index([1 * dti.freq, 2 * dti.freq]) + +.. _whatsnew_0240.deprecations.tz_aware_array: + +Converting Timezone-Aware Series and Index to NumPy Arrays +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The conversion from a :class:`Series` or :class:`Index` with timezone-aware +datetime data will change to preserve timezones by default (:issue:`23569`). + +NumPy doesn't have a dedicated dtype for timezone-aware datetimes. +In the past, converting a :class:`Series` or :class:`DatetimeIndex` with +timezone-aware datatimes would convert to a NumPy array by + +1. converting the tz-aware data to UTC +2. dropping the timezone-info +3. returning a :class:`numpy.ndarray` with ``datetime64[ns]`` dtype + +Future versions of pandas will preserve the timezone information by returning an +object-dtype NumPy array where each value is a :class:`Timestamp` with the correct +timezone attached + +.. ipython:: python + + ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + ser + +The default behavior remains the same, but issues a warning + +.. code-block:: python + + In [8]: np.asarray(ser) + /bin/ipython:1: FutureWarning: Converting timezone-aware DatetimeArray to timezone-naive + ndarray with 'datetime64[ns]' dtype. In the future, this will return an ndarray + with 'object' dtype where each element is a 'pandas.Timestamp' with the correct 'tz'. + + To accept the future behavior, pass 'dtype=object'. + To keep the old behavior, pass 'dtype="datetime64[ns]"'. + #!/bin/python3 + Out[8]: + array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00.000000000'], + dtype='datetime64[ns]') + +The previous or future behavior can be obtained, without any warnings, by specifying +the ``dtype`` + +*Previous Behavior* + +.. ipython:: python + + np.asarray(ser, dtype='datetime64[ns]') + +*Future Behavior* + +.. ipython:: python + + # New behavior + np.asarray(ser, dtype=object) + + +Or by using :meth:`Series.to_numpy` + +.. ipython:: python + + ser.to_numpy() + ser.to_numpy(dtype="datetime64[ns]") + +All the above applies to a :class:`DatetimeIndex` with tz-aware values as well. + .. _whatsnew_0240.prior_deprecations: Removal of prior version deprecations/changes diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a55e8759deedb..e6fbc6d1f4b15 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -524,7 +524,7 @@ def _resolution(self): # Array-Like / EA-Interface Methods def __array__(self, dtype=None): - if is_object_dtype(dtype): + if is_object_dtype(dtype) or (dtype is None and self.tz): return np.array(list(self), dtype=object) elif is_int64_dtype(dtype): return self.asi8 diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b2d72eb49d2de..bd6094596c5e1 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1020,7 +1020,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'): # datetime64tz is assumed to be naive which should # be localized to the timezone. is_dt_string = is_string_dtype(value) - value = to_datetime(value, errors=errors) + value = to_datetime(value, errors=errors).array if is_dt_string: # Strings here are naive, so directly localize value = value.tz_localize(dtype.tz) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e35ee32657509..79756d4c0cfab 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -403,6 +403,7 @@ def _hash_categories(categories, ordered=True): from pandas.core.util.hashing import ( hash_array, _combine_hash_arrays, hash_tuples ) + from pandas.core.dtypes.common import is_datetime64tz_dtype, _NS_DTYPE if len(categories) and isinstance(categories[0], tuple): # assumes if any individual category is a tuple, then all our. ATM @@ -420,6 +421,11 @@ def _hash_categories(categories, ordered=True): # find a better solution hashed = hash((tuple(categories), ordered)) return hashed + + if is_datetime64tz_dtype(categories.dtype): + # Avoid future warning. + categories = categories.astype(_NS_DTYPE) + cat_array = hash_array(np.asarray(categories), categorize=False) if ordered: cat_array = np.vstack([ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b8b73b6aab1a5..e52ab66ef9cb4 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1271,8 +1271,8 @@ def f(self, **kwargs): def first_compat(x, axis=0): def first(x): + x = x.to_numpy() - x = np.asarray(x) x = x[notna(x)] if len(x) == 0: return np.nan @@ -1286,8 +1286,7 @@ def first(x): def last_compat(x, axis=0): def last(x): - - x = np.asarray(x) + x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f396f081267b3..ab1ac45122658 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -339,6 +339,21 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): # -------------------------------------------------------------------- + def __array__(self, dtype=None): + if (dtype is None and isinstance(self._data, DatetimeArray) + and getattr(self.dtype, 'tz', None)): + msg = ( + "Converting timezone-aware DatetimeArray to timezone-naive " + "ndarray with 'datetime64[ns]' dtype. In the future, this " + "will return an ndarray with 'object' dtype where each " + "element is a 'pandas.Timestamp' with the correct 'tz'.\n\t" + "To accept the future behavior, pass 'dtype=object'.\n\t" + "To keep the old behavior, pass 'dtype=\"datetime64[ns]\"'." + ) + warnings.warn(msg, FutureWarning, stacklevel=3) + dtype = 'M8[ns]' + return np.asarray(self._data, dtype=dtype) + @property def dtype(self): return self._eadata.dtype @@ -1114,7 +1129,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): strftime = ea_passthrough(DatetimeArray.strftime) _has_same_tz = ea_passthrough(DatetimeArray._has_same_tz) - __array__ = ea_passthrough(DatetimeArray.__array__) @property def offset(self): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 3504c6e12b896..95bf776b1f19d 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -581,7 +581,12 @@ def can_do_equal_len(): setter(item, v) # we have an equal len ndarray/convertible to our labels - elif np.array(value).ndim == 2: + # hasattr first, to avoid coercing to ndarray without reason. + # But we may be relying on the ndarray coercion to check ndim. + # Why not just convert to an ndarray earlier on if needed? + elif ((hasattr(value, 'ndim') and value.ndim == 2) + or (not hasattr(value, 'ndim') and + np.array(value).ndim) == 2): # note that this coerces the dtype if we are mixed # GH 7551 diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f88114e1c9e20..4b2f93451dad0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1447,8 +1447,20 @@ def quantile(self, qs, interpolation='linear', axis=0): ------- Block """ - values = self.get_values() - values, _ = self._try_coerce_args(values, values) + if self.is_datetimetz: + # TODO: cleanup this special case. + # We need to operate on i8 values for datetimetz + # but `Block.get_values()` returns an ndarray of objects + # right now. We need an API for "values to do numeric-like ops on" + values = self.values.asi8 + + # TODO: NonConsolidatableMixin shape + # Usual shape inconsistencies for ExtensionBlocks + if self.ndim > 1: + values = values[None, :] + else: + values = self.get_values() + values, _ = self._try_coerce_args(values, values) is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) @@ -2055,10 +2067,6 @@ def _na_value(self): def fill_value(self): return tslibs.iNaT - def to_dense(self): - # TODO(DatetimeBlock): remove - return np.asarray(self.values) - def get_values(self, dtype=None): """ return object dtype as boxed values, such as Timestamps/Timedelta @@ -2330,6 +2338,12 @@ def get_values(self, dtype=None): values = values.reshape(1, -1) return values + def to_dense(self): + # we request M8[ns] dtype here, even though it discards tzinfo, + # as lots of code (e.g. anything using values_from_object) + # expects that behavior. + return np.asarray(self.values, dtype=_NS_DTYPE) + def _slice(self, slicer): """ return a slice of my values """ if isinstance(slicer, tuple): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 878a417b46674..7af347a141781 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -34,6 +34,7 @@ from pandas.core.indexes import base as ibase from pandas.core.internals import ( create_block_manager_from_arrays, create_block_manager_from_blocks) +from pandas.core.internals.arrays import extract_array # --------------------------------------------------------------------- # BlockManager Interface @@ -539,7 +540,6 @@ def sanitize_array(data, index, dtype=None, copy=False, Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified. """ - if dtype is not None: dtype = pandas_dtype(dtype) @@ -552,8 +552,10 @@ def sanitize_array(data, index, dtype=None, copy=False, else: data = data.copy() + data = extract_array(data, extract_numpy=True) + # GH#846 - if isinstance(data, (np.ndarray, Index, ABCSeries)): + if isinstance(data, np.ndarray): if dtype is not None: subarr = np.array(data, copy=False) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 89e191f171f97..cafd3a9915fa0 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -144,7 +144,9 @@ def f(values, axis=None, skipna=True, **kwds): def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 - if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)): + if (not is_object_dtype(dt) and + not (is_datetime_or_timedelta_dtype(dt) or + is_datetime64tz_dtype(dt))): # GH 15507 # bottleneck does not properly upcast during the sum diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 6f95b14993228..15df0ca2442fa 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -8,7 +8,7 @@ from pandas._libs.lib import infer_dtype from pandas.core.dtypes.common import ( - ensure_int64, is_categorical_dtype, is_datetime64_dtype, + _NS_DTYPE, ensure_int64, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer, is_scalar, is_timedelta64_dtype) from pandas.core.dtypes.missing import isna @@ -226,7 +226,10 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, raise ValueError('Overlapping IntervalIndex is not accepted.') else: - bins = np.asarray(bins) + if is_datetime64tz_dtype(bins): + bins = np.asarray(bins, dtype=_NS_DTYPE) + else: + bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) if (np.diff(bins) < 0).any(): raise ValueError('bins must increase monotonically.') diff --git a/pandas/core/series.py b/pandas/core/series.py index de34227cda28a..04b8b1ed74d9c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -21,7 +21,8 @@ is_extension_array_dtype, is_extension_type, is_hashable, is_integer, is_iterator, is_list_like, is_scalar, is_string_like, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCDatetimeIndex, ABCSeries, ABCSparseArray, ABCSparseSeries) + ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, ABCSeries, + ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, remove_na_arraylike) @@ -661,11 +662,66 @@ def view(self, dtype=None): # ---------------------------------------------------------------------- # NDArray Compat - def __array__(self, result=None): + def __array__(self, dtype=None): """ - The array interface, return my values. - """ - return self.get_values() + Return the values as a NumPy array. + + Users should not call this directly. Rather, it is invoked by + :func:`numpy.array` and :func:`numpy.asarray`. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to use for the resulting NumPy array. By default, + the dtype is inferred from the data. + + Returns + ------- + numpy.ndarray + The values in the series converted to a :class:`numpy.ndarary` + with the specified `dtype`. + + See Also + -------- + pandas.array : Create a new array from data. + Series.array : Zero-copy view to the array backing the Series. + Series.to_numpy : Series method for similar behavior. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + >>> np.asarray(ser) + array([1, 2, 3]) + + For timezone-aware data, the timezones may be retained with + ``dtype='object'`` + + >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> np.asarray(tzser, dtype="object") + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], + dtype=object) + + Or the values may be localized to UTC and the tzinfo discared with + ``dtype='datetime64[ns]'`` + + >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS + array(['1999-12-31T23:00:00.000000000', ...], + dtype='datetime64[ns]') + """ + if (dtype is None and isinstance(self.array, ABCDatetimeArray) + and getattr(self.dtype, 'tz', None)): + msg = ( + "Converting timezone-aware DatetimeArray to timezone-naive " + "ndarray with 'datetime64[ns]' dtype. In the future, this " + "will return an ndarray with 'object' dtype where each " + "element is a 'pandas.Timestamp' with the correct 'tz'.\n\t" + "To accept the future behavior, pass 'dtype=object'.\n\t" + "To keep the old behavior, pass 'dtype=\"datetime64[ns]\"'." + ) + warnings.warn(msg, FutureWarning, stacklevel=3) + dtype = 'M8[ns]' + return np.asarray(self.array, dtype) def __array_wrap__(self, result, context=None): """ diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index db88d94be1cab..8f8531ff97e69 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -262,11 +262,11 @@ def test_array(self, tz_naive_fixture): arr = DatetimeArray(dti) expected = dti.asi8.view('M8[ns]') - result = np.array(arr) + result = np.array(arr, dtype='M8[ns]') tm.assert_numpy_array_equal(result, expected) # check that we are not making copies when setting copy=False - result = np.array(arr, copy=False) + result = np.array(arr, dtype='M8[ns]', copy=False) assert result.base is expected.base assert result.base is not None diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8890593b1fa9d..72504fe09259e 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -178,6 +178,39 @@ def test_fillna_preserves_tz(self, method): assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') + def test_array_interface_tz(self): + tz = "US/Central" + data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) + result = np.asarray(data) + + expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), + pd.Timestamp('2017-01-02T00:00:00', tz=tz)], + dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype='M8[ns]') + + expected = np.array(['2017-01-01T06:00:00', + '2017-01-02T06:00:00'], dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self): + data = DatetimeArray(pd.date_range('2017', periods=2)) + expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], + dtype='datetime64[ns]') + + result = np.asarray(data) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), + pd.Timestamp('2017-01-02T00:00:00')], + dtype=object) + tm.assert_numpy_array_equal(result, expected) + class TestSequenceToDT64NS(object): diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 56c9395d0f802..965e5e000d026 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from datetime import datetime -from warnings import catch_warnings, simplefilter +from warnings import catch_warnings, filterwarnings, simplefilter import numpy as np import pytest @@ -278,17 +278,20 @@ def test_array_equivalent(): TimedeltaIndex([0, np.nan])) assert not array_equivalent( TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan])) - assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'), - DatetimeIndex([0, np.nan], tz='US/Eastern')) - assert not array_equivalent( - DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex( - [1, np.nan], tz='US/Eastern')) - assert not array_equivalent( - DatetimeIndex([0, np.nan]), DatetimeIndex( - [0, np.nan], tz='US/Eastern')) - assert not array_equivalent( - DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex( - [0, np.nan], tz='US/Eastern')) + with catch_warnings(): + filterwarnings("ignore", "Converting timezone", FutureWarning) + assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'), + DatetimeIndex([0, np.nan], tz='US/Eastern')) + assert not array_equivalent( + DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex( + [1, np.nan], tz='US/Eastern')) + assert not array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex( + [0, np.nan], tz='US/Eastern')) + assert not array_equivalent( + DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex( + [0, np.nan], tz='US/Eastern')) + assert not array_equivalent( DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index e76de2ebedf67..e1ba0e1708442 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -392,3 +392,45 @@ def test_unique(self, arr, expected): # GH 21737 # Ensure the underlying data is consistent assert result[0] == expected[0] + + def test_asarray_tz_naive(self): + # This shouldn't produce a warning. + idx = pd.date_range('2000', periods=2) + # M8[ns] by default + with tm.assert_produces_warning(None): + result = np.asarray(idx) + + expected = np.array(['2000-01-01', '2000-01-02'], dtype='M8[ns]') + tm.assert_numpy_array_equal(result, expected) + + # optionally, object + with tm.assert_produces_warning(None): + result = np.asarray(idx, dtype=object) + + expected = np.array([pd.Timestamp('2000-01-01'), + pd.Timestamp('2000-01-02')]) + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_aware(self): + tz = 'US/Central' + idx = pd.date_range('2000', periods=2, tz=tz) + expected = np.array(['2000-01-01T06', '2000-01-02T06'], dtype='M8[ns]') + # We warn by default and return an ndarray[M8[ns]] + with tm.assert_produces_warning(FutureWarning): + result = np.asarray(idx) + + tm.assert_numpy_array_equal(result, expected) + + # Old behavior with no warning + with tm.assert_produces_warning(None): + result = np.asarray(idx, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Future behavior with no warning + expected = np.array([pd.Timestamp("2000-01-01", tz=tz), + pd.Timestamp("2000-01-02", tz=tz)]) + with tm.assert_produces_warning(None): + result = np.asarray(idx, dtype=object) + + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index fcb486d832c76..07808008c081c 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -1036,3 +1036,44 @@ def test_view_tz(self): 946879200000000000, 946965600000000000]) tm.assert_series_equal(result, expected) + + def test_asarray_tz_naive(self): + # This shouldn't produce a warning. + ser = pd.Series(pd.date_range('2000', periods=2)) + expected = np.array(['2000-01-01', '2000-01-02'], dtype='M8[ns]') + with tm.assert_produces_warning(None): + result = np.asarray(ser) + + tm.assert_numpy_array_equal(result, expected) + + # optionally, object + with tm.assert_produces_warning(None): + result = np.asarray(ser, dtype=object) + + expected = np.array([pd.Timestamp('2000-01-01'), + pd.Timestamp('2000-01-02')]) + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_aware(self): + tz = 'US/Central' + ser = pd.Series(pd.date_range('2000', periods=2, tz=tz)) + expected = np.array(['2000-01-01T06', '2000-01-02T06'], dtype='M8[ns]') + # We warn by default and return an ndarray[M8[ns]] + with tm.assert_produces_warning(FutureWarning): + result = np.asarray(ser) + + tm.assert_numpy_array_equal(result, expected) + + # Old behavior with no warning + with tm.assert_produces_warning(None): + result = np.asarray(ser, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Future behavior with no warning + expected = np.array([pd.Timestamp("2000-01-01", tz=tz), + pd.Timestamp("2000-01-02", tz=tz)]) + with tm.assert_produces_warning(None): + result = np.asarray(ser, dtype=object) + + tm.assert_numpy_array_equal(result, expected)
This deprecates the current behvior when converting tz-aware Series or Index to an ndarray. Previously, we converted to M8[ns], throwing away the timezone information. In the future, we will return an object-dtype array filled with Timestamps, each of which has the correct tz. ```python In [1]: import pandas as pd; import numpy as np In [2]: ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) In [3]: np.asarray(ser) /bin/ipython:1: FutureWarning: Converting timezone-aware DatetimeArray to timezone-naive ndarray with 'datetime64[ns]' dtype. In the future, this will return an ndarray with 'object' dtype where each element is a 'pandas.Timestamp' with the correct 'tz'. To accept the future behavior, pass 'dtype=object'. To keep the old behavior, pass 'dtype="datetime64[ns]"'. #!/Users/taugspurger/Envs/pandas-dev/bin/python3 Out[3]: array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00.000000000'], dtype='datetime64[ns]') ``` xref https://github.com/pandas-dev/pandas/issues/23569 closes https://github.com/pandas-dev/pandas/issues/15750
https://api.github.com/repos/pandas-dev/pandas/pulls/24596
2019-01-03T17:55:54Z
2019-01-05T14:51:12Z
2019-01-05T14:51:12Z
2019-12-30T20:16:47Z
DOC: fix to_numpy explanation for tz aware data
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 8dca000dfa969..73ae26150b946 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -99,27 +99,6 @@ are two possibly useful representations: Timezones may be preserved with ``dtype=object`` -.. ipython:: python - - ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) - ser.to_numpy(dtype=object) - -Or thrown away with ``dtype='datetime64[ns]'`` - - ser.to_numpy(dtype="datetime64[ns]") - -:meth:`~Series.to_numpy` gives some control over the ``dtype`` of the -resulting :class:`ndarray`. For example, consider datetimes with timezones. -NumPy doesn't have a dtype to represent timezone-aware datetimes, so there -are two possibly useful representations: - -1. An object-dtype :class:`ndarray` with :class:`Timestamp` objects, each - with the correct ``tz`` -2. A ``datetime64[ns]`` -dtype :class:`ndarray`, where the values have - been converted to UTC and the timezone discarded - -Timezones may be preserved with ``dtype=object`` - .. ipython:: python ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index a391d73b8922e..f56ad710973dd 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -2425,21 +2425,25 @@ a convert on an aware stamp. .. note:: Using :meth:`Series.to_numpy` on a ``Series``, returns a NumPy array of the data. - These values are converted to UTC, as NumPy does not currently support timezones (even though it is *printing* in the local timezone!). + NumPy does not currently support timezones (even though it is *printing* in the local timezone!), + therefore an object array of Timestamps is returned for timezone aware data: .. ipython:: python s_naive.to_numpy() s_aware.to_numpy() - Further note that once converted to a NumPy array these would lose the tz tenor. + By converting to an object array of Timestamps, it preserves the timezone + information. For example, when converting back to a Series: .. ipython:: python pd.Series(s_aware.to_numpy()) - However, these can be easily converted: + However, if you want an actual NumPy ``datetime64[ns]`` array (with the values + converted to UTC) instead of an array of objects, you can specify the + ``dtype`` argument: .. ipython:: python - pd.Series(s_aware.to_numpy()).dt.tz_localize('UTC').dt.tz_convert('US/Eastern') + s_aware.to_numpy(dtype='datetime64[ns]') diff --git a/pandas/core/base.py b/pandas/core/base.py index c37ab48de7cb8..c02ba88ea7fda 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -899,7 +899,6 @@ def to_numpy(self, dtype=None, copy=False): ``to_numpy()`` will return a NumPy array and the categorical dtype will be lost. - For NumPy dtypes, this will be a reference to the actual data stored in this Series or Index (assuming ``copy=False``). Modifying the result in place will modify the data stored in the Series or Index (not that @@ -910,7 +909,7 @@ def to_numpy(self, dtype=None, copy=False): expensive. When you need a no-copy reference to the underlying data, :attr:`Series.array` should be used instead. - This table lays out the different dtypes and return types of + This table lays out the different dtypes and default return types of ``to_numpy()`` for various dtypes within pandas. ================== ================================ @@ -920,6 +919,7 @@ def to_numpy(self, dtype=None, copy=False): period ndarray[object] (Periods) interval ndarray[object] (Intervals) IntegerNA ndarray[object] + datetime64[ns] datetime64[ns] datetime64[ns, tz] ndarray[object] (Timestamps) ================== ================================
Some clean-up now `to_numpy` preserves timezone and no longer converts to UTC datetime64 by default (after #24024), the example in timeseries.rst was failing due to that.
https://api.github.com/repos/pandas-dev/pandas/pulls/24595
2019-01-03T16:26:52Z
2019-01-03T21:08:46Z
2019-01-03T21:08:46Z
2019-01-03T22:49:17Z
Remove unhittable methods in internals
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d12114bd951ba..3b2c13af785d4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -26,7 +26,7 @@ is_re, is_re_compilable, is_sparse, is_timedelta64_dtype, pandas_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.dtypes import ( - CategoricalDtype, DatetimeTZDtype, ExtensionDtype, PandasExtensionDtype) + CategoricalDtype, ExtensionDtype, PandasExtensionDtype) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, ABCIndexClass, ABCSeries) @@ -1507,15 +1507,8 @@ def _nanpercentile(values, q, axis, **kw): len(values)).reshape(len(values), len(qs)) else: - - try: - result = _nanpercentile(values, np.array(qs) * 100, - axis=axis, **kw) - except ValueError: - - # older numpies don't handle an array for q - result = [_nanpercentile(values, q * 100, - axis=axis, **kw) for q in qs] + result = _nanpercentile(values, np.array(qs) * 100, + axis=axis, **kw) result = np.array(result, copy=False) if self.ndim > 1: @@ -1639,13 +1632,6 @@ def shape(self): return (len(self.values)), return (len(self.mgr_locs), len(self.values)) - def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ - values = self.values.to_dense() - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - def iget(self, col): if self.ndim == 2 and isinstance(col, tuple): @@ -1700,49 +1686,9 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)] - def _slice(self, slicer): - """ return a slice of my values (but densify first) """ - return self.get_values()[slicer] - def _try_cast_result(self, result, dtype=None): return result - def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - n_rows : int - Only used in ExtensionBlock.unstack - fill_value : int - Only used in ExtensionBlock.unstack - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - # NonConsolidatable blocks can have a single item only, so we return - # one block per item - unstacker = unstacker_func(self.values.T) - - new_placement, new_values, mask = self._get_unstack_items( - unstacker, new_columns - ) - - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [self.make_block_same_class(vals, [place]) - for vals, place in zip(new_values, new_placement)] - return blocks, mask - def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. @@ -2330,11 +2276,11 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None, i8values = i8values[..., slicer] from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(values, date_format) + fmt = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime( i8values.ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(i8values.shape) + format=fmt, na_rep=na_rep).reshape(i8values.shape) return np.atleast_2d(result) def should_store(self, value): @@ -2400,8 +2346,6 @@ def _maybe_coerce_values(self, values, dtype=None): values = self._holder(values) if dtype is not None: - if isinstance(dtype, compat.string_types): - dtype = DatetimeTZDtype.construct_from_string(dtype) values = type(values)(values, dtype=dtype) if values.tz is None: diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 418046e42d581..b877ed93f07a2 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -3245,9 +3245,7 @@ def test_setitem(self): b1 = df._data.blocks[1] b2 = df._data.blocks[2] tm.assert_extension_array_equal(b1.values, b2.values) - if b1.values._data.base is not None: - # base being None suffices to assure a copy was made - assert id(b1.values._data.base) != id(b2.values._data.base) + assert id(b1.values._data.base) != id(b2.values._data.base) # with nan df2 = df.copy()
And one more 24024 in test cleanup
https://api.github.com/repos/pandas-dev/pandas/pulls/24594
2019-01-03T15:56:39Z
2019-01-03T17:21:20Z
2019-01-03T17:21:20Z
2019-01-03T18:26:47Z
DOC: hide warning from arrow about deprecated MultiIndex labels
diff --git a/doc/source/io.rst b/doc/source/io.rst index 967648f3a168a..2149ee7fb46d9 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4647,6 +4647,7 @@ Write to a feather file. Read from a feather file. .. ipython:: python + :okwarning: result = pd.read_feather('example.feather') result @@ -4721,6 +4722,7 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python + :okwarning: result = pd.read_parquet('example_fp.parquet', engine='fastparquet') result = pd.read_parquet('example_pa.parquet', engine='pyarrow') @@ -4791,6 +4793,7 @@ Partitioning Parquet files Parquet supports partitioning of data based on the values of one or more columns. .. ipython:: python + :okwarning: df = pd.DataFrame({'a': [0, 0, 1, 1], 'b': [0, 1, 0, 1]}) df.to_parquet(fname='test', engine='pyarrow',
This is fixed upstream in arrow (https://github.com/apache/arrow/pull/3120), but until a new release is there, avoiding the warning in our docs to have a cleaner doc build output.
https://api.github.com/repos/pandas-dev/pandas/pulls/24591
2019-01-03T13:24:08Z
2019-01-03T15:19:13Z
2019-01-03T15:19:13Z
2019-01-03T15:58:28Z
DOC: Bump fastparquet version
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml index b6dc2b3c27e8d..58abbabce3d86 100644 --- a/ci/deps/azure-macos-35.yaml +++ b/ci/deps/azure-macos-35.yaml @@ -14,7 +14,6 @@ dependencies: - numpy=1.12.0 - openpyxl=2.5.5 - pyarrow - - fastparquet - pytables - python=3.5* - pytz diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 817aab66c65aa..7b132a134c44e 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -6,7 +6,7 @@ dependencies: - blosc - bottleneck - boost-cpp<1.67 - - fastparquet + - fastparquet>=0.2.1 - matplotlib - numexpr - numpy=1.14* @@ -18,7 +18,6 @@ dependencies: - python=3.6.6 - pytz - scipy - - thrift=0.10* - xlrd - xlsxwriter - xlwt diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml index 8d14673ebde6d..0f2194e71de31 100644 --- a/ci/deps/travis-27.yaml +++ b/ci/deps/travis-27.yaml @@ -6,7 +6,7 @@ dependencies: - beautifulsoup4 - bottleneck - cython=0.28.2 - - fastparquet + - fastparquet>=0.2.1 - gcsfs - html5lib - ipython diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml index c345af0a2983c..26f3a17432ab2 100644 --- a/ci/deps/travis-36-doc.yaml +++ b/ci/deps/travis-36-doc.yaml @@ -6,7 +6,7 @@ dependencies: - beautifulsoup4 - bottleneck - cython>=0.28.2 - - fastparquet + - fastparquet>=0.2.1 - gitpython - html5lib - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml index 1085ecd008fa6..74db888d588f4 100644 --- a/ci/deps/travis-36.yaml +++ b/ci/deps/travis-36.yaml @@ -7,7 +7,7 @@ dependencies: - botocore>=1.11 - cython>=0.28.2 - dask - - fastparquet + - fastparquet>=0.2.1 - gcsfs - geopandas - html5lib diff --git a/doc/source/install.rst b/doc/source/install.rst index e25c343a1cce0..fa3ff2f20b150 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -258,7 +258,7 @@ Optional Dependencies * `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended. * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher * `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0): necessary for feather-based storage. -* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.1.2) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support. +* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.2.1) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support. * `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are: * `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3be87c4cabaf0..3a3fde2772b29 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -474,7 +474,7 @@ If installed, we now require: +-----------------+-----------------+----------+ | bottleneck | 1.2.0 | | +-----------------+-----------------+----------+ -| fastparquet | 0.1.2 | | +| fastparquet | 0.2.1 | | +-----------------+-----------------+----------+ | matplotlib | 2.0.0 | | +-----------------+-----------------+----------+ diff --git a/environment.yml b/environment.yml index 42da3e31de548..a980499029478 100644 --- a/environment.yml +++ b/environment.yml @@ -29,7 +29,7 @@ dependencies: - botocore>=1.11 - boto3 - bottleneck>=1.2.0 - - fastparquet>=0.1.2 + - fastparquet>=0.2.1 - html5lib - ipython>=5.6.0 - ipykernel diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4e52c35c6b1e6..a40fe0c9aa74f 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -151,9 +151,9 @@ def __init__(self): "\nor via pip\n" "pip install -U fastparquet" ) - if LooseVersion(fastparquet.__version__) < '0.1.2': + if LooseVersion(fastparquet.__version__) < '0.2.1': raise ImportError( - "fastparquet >= 0.1.2 is required for parquet " + "fastparquet >= 0.2.1 is required for parquet " "support\n\n" "you can install via conda\n" "conda install fastparquet -c conda-forge\n" diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d985ca4eb67ea..8833c6f7813c6 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -59,15 +59,6 @@ def fp(): return 'fastparquet' -@pytest.fixture -def fp_lt_014(): - if not _HAVE_FASTPARQUET: - pytest.skip("fastparquet is not installed") - if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'): - pytest.skip("fastparquet is >= 0.1.4") - return 'fastparquet' - - @pytest.fixture def df_compat(): return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'}) @@ -510,16 +501,6 @@ def test_categorical(self, fp): df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) check_round_trip(df, fp) - def test_datetime_tz(self, fp_lt_014): - - # fastparquet<0.1.4 doesn't preserve tz - df = pd.DataFrame({'a': pd.date_range('20130101', periods=3, - tz='US/Eastern')}) - # warns on the coercion - with catch_warnings(record=True): - check_round_trip(df, fp_lt_014, - expected=df.astype('datetime64[ns]')) - def test_filter_row_groups(self, fp): d = {'a': list(range(0, 3))} df = pd.DataFrame(d) diff --git a/requirements-dev.txt b/requirements-dev.txt index a7aa0bacb5bd6..48bd95470d391 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -18,7 +18,7 @@ blosc botocore>=1.11 boto3 bottleneck>=1.2.0 -fastparquet>=0.1.2 +fastparquet>=0.2.1 html5lib ipython>=5.6.0 ipykernel
Should we just bump the min version to 0.2.1? It's quite new, but I think reading datetime data is somewhat common :) It'll save us bug reports.
https://api.github.com/repos/pandas-dev/pandas/pulls/24590
2019-01-03T13:17:47Z
2019-01-05T14:50:47Z
2019-01-05T14:50:47Z
2019-11-21T13:54:12Z
DOC: 32-bit warning
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3a04789b609f8..f025478fbdc78 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -8,6 +8,9 @@ What's New in 0.24.0 (January XX, 2019) The 0.24.x series of releases will be the last to support Python 2. Future feature releases will support Python 3 only. See :ref:`install.dropping-27` for more. + The 0.24.x series of releases will be the last to support 32-bit Python. Future feature + releases will support 64-bit Python only. + {{ header }}
xref #15889
https://api.github.com/repos/pandas-dev/pandas/pulls/24588
2019-01-03T12:12:07Z
2019-01-08T13:01:45Z
null
2019-01-08T13:01:45Z
TST/REF: Add more pytest idiom to tests/tslib
diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py index ff8880257b225..f5b036dde2094 100644 --- a/pandas/tests/tslibs/test_array_to_datetime.py +++ b/pandas/tests/tslibs/test_array_to_datetime.py @@ -12,159 +12,145 @@ import pandas.util.testing as tm -class TestParseISO8601(object): - @pytest.mark.parametrize('date_str, exp', [ - ('2011-01-02', datetime(2011, 1, 2)), - ('2011-1-2', datetime(2011, 1, 2)), - ('2011-01', datetime(2011, 1, 1)), - ('2011-1', datetime(2011, 1, 1)), - ('2011 01 02', datetime(2011, 1, 2)), - ('2011.01.02', datetime(2011, 1, 2)), - ('2011/01/02', datetime(2011, 1, 2)), - ('2011\\01\\02', datetime(2011, 1, 2)), - ('2013-01-01 05:30:00', datetime(2013, 1, 1, 5, 30)), - ('2013-1-1 5:30:00', datetime(2013, 1, 1, 5, 30))]) - def test_parsers_iso8601(self, date_str, exp): - # GH#12060 - # test only the iso parser - flexibility to different - # separators and leadings 0s - # Timestamp construction falls back to dateutil - actual = tslib._test_parse_iso8601(date_str) - assert actual == exp - - @pytest.mark.parametrize( - 'date_str', - ['2011-01/02', '2011^11^11', - '201401', '201111', '200101', - # mixed separated and unseparated - '2005-0101', '200501-01', - '20010101 12:3456', - '20010101 1234:56', - # HHMMSS must have two digits in - # each component if unseparated - '20010101 1', '20010101 123', - '20010101 12345', '20010101 12345Z', - # wrong separator for HHMMSS - '2001-01-01 12-34-56']) - def test_parsers_iso8601_invalid(self, date_str): - # separators must all match - YYYYMM not valid - with pytest.raises(ValueError): - tslib._test_parse_iso8601(date_str) - - -class TestArrayToDatetime(object): - def test_parsing_valid_dates(self): - arr = np.array(['01-01-2013', '01-02-2013'], dtype=object) - result, _ = tslib.array_to_datetime(arr) - expected = ['2013-01-01T00:00:00.000000000-0000', - '2013-01-02T00:00:00.000000000-0000'] - tm.assert_numpy_array_equal( - result, - np_array_datetime64_compat(expected, dtype='M8[ns]')) +@pytest.mark.parametrize("data,expected", [ + (["01-01-2013", "01-02-2013"], + ["2013-01-01T00:00:00.000000000-0000", + "2013-01-02T00:00:00.000000000-0000"]), + (["Mon Sep 16 2013", "Tue Sep 17 2013"], + ["2013-09-16T00:00:00.000000000-0000", + "2013-09-17T00:00:00.000000000-0000"]) +]) +def test_parsing_valid_dates(data, expected): + arr = np.array(data, dtype=object) + result, _ = tslib.array_to_datetime(arr) + + expected = np_array_datetime64_compat(expected, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dt_string, expected_tz", [ + ["01-01-2013 08:00:00+08:00", 480], + ["2013-01-01T08:00:00.000000000+0800", 480], + ["2012-12-31T16:00:00.000000000-0800", -480], + ["12-31-2012 23:00:00-01:00", -60] +]) +def test_parsing_timezone_offsets(dt_string, expected_tz): + # All of these datetime strings with offsets are equivalent + # to the same datetime after the timezone offset is added. + arr = np.array(["01-01-2013 00:00:00"], dtype=object) + expected, _ = tslib.array_to_datetime(arr) + + arr = np.array([dt_string], dtype=object) + result, result_tz = tslib.array_to_datetime(arr) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz is pytz.FixedOffset(expected_tz) + + +def test_parsing_non_iso_timezone_offset(): + dt_string = "01-01-2013T00:00:00.000000000+0000" + arr = np.array([dt_string], dtype=object) + + result, result_tz = tslib.array_to_datetime(arr) + expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")]) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz is pytz.FixedOffset(0) + + +def test_parsing_different_timezone_offsets(): + # see gh-17697 + data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"] + data = np.array(data, dtype=object) + + result, result_tz = tslib.array_to_datetime(data) + expected = np.array([datetime(2015, 11, 18, 15, 30, + tzinfo=tzoffset(None, 19800)), + datetime(2015, 11, 18, 15, 30, + tzinfo=tzoffset(None, 23400))], + dtype=object) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz is None + + +@pytest.mark.parametrize("data", [ + ["-352.737091", "183.575577"], + ["1", "2", "3", "4", "5"] +]) +def test_number_looking_strings_not_into_datetime(data): + # see gh-4601 + # + # These strings don't look like datetimes, so + # they shouldn't be attempted to be converted. + arr = np.array(data, dtype=object) + result, _ = tslib.array_to_datetime(arr, errors="ignore") + + tm.assert_numpy_array_equal(result, arr) + + +@pytest.mark.parametrize("invalid_date", [ + date(1000, 1, 1), + datetime(1000, 1, 1), + "1000-01-01", + "Jan 1, 1000", + np.datetime64("1000-01-01")]) +@pytest.mark.parametrize("errors", ["coerce", "raise"]) +def test_coerce_outside_ns_bounds(invalid_date, errors): + arr = np.array([invalid_date], dtype="object") + kwargs = dict(values=arr, errors=errors) + + if errors == "raise": + msg = "Out of bounds nanosecond timestamp" + + with pytest.raises(ValueError, match=msg): + tslib.array_to_datetime(**kwargs) + else: # coerce. + result, _ = tslib.array_to_datetime(**kwargs) + expected = np.array([iNaT], dtype="M8[ns]") - arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object) - result, _ = tslib.array_to_datetime(arr) - expected = ['2013-09-16T00:00:00.000000000-0000', - '2013-09-17T00:00:00.000000000-0000'] - tm.assert_numpy_array_equal( - result, - np_array_datetime64_compat(expected, dtype='M8[ns]')) - - @pytest.mark.parametrize('dt_string, expected_tz', [ - ['01-01-2013 08:00:00+08:00', pytz.FixedOffset(480)], - ['2013-01-01T08:00:00.000000000+0800', pytz.FixedOffset(480)], - ['2012-12-31T16:00:00.000000000-0800', pytz.FixedOffset(-480)], - ['12-31-2012 23:00:00-01:00', pytz.FixedOffset(-60)]]) - def test_parsing_timezone_offsets(self, dt_string, expected_tz): - # All of these datetime strings with offsets are equivalent - # to the same datetime after the timezone offset is added - arr = np.array(['01-01-2013 00:00:00'], dtype=object) - expected, _ = tslib.array_to_datetime(arr) - - arr = np.array([dt_string], dtype=object) - result, result_tz = tslib.array_to_datetime(arr) tm.assert_numpy_array_equal(result, expected) - assert result_tz is expected_tz - def test_parsing_non_iso_timezone_offset(self): - dt_string = '01-01-2013T00:00:00.000000000+0000' - arr = np.array([dt_string], dtype=object) - result, result_tz = tslib.array_to_datetime(arr) - expected = np.array([np.datetime64('2013-01-01 00:00:00.000000000')]) - tm.assert_numpy_array_equal(result, expected) - assert result_tz is pytz.FixedOffset(0) - - def test_parsing_different_timezone_offsets(self): - # GH 17697 - data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"] - data = np.array(data, dtype=object) - result, result_tz = tslib.array_to_datetime(data) - expected = np.array([datetime(2015, 11, 18, 15, 30, - tzinfo=tzoffset(None, 19800)), - datetime(2015, 11, 18, 15, 30, - tzinfo=tzoffset(None, 23400))], - dtype=object) - tm.assert_numpy_array_equal(result, expected) - assert result_tz is None - - def test_number_looking_strings_not_into_datetime(self): - # GH#4601 - # These strings don't look like datetimes so they shouldn't be - # attempted to be converted - arr = np.array(['-352.737091', '183.575577'], dtype=object) - result, _ = tslib.array_to_datetime(arr, errors='ignore') - tm.assert_numpy_array_equal(result, arr) - arr = np.array(['1', '2', '3', '4', '5'], dtype=object) - result, _ = tslib.array_to_datetime(arr, errors='ignore') - tm.assert_numpy_array_equal(result, arr) +def test_coerce_outside_ns_bounds_one_valid(): + arr = np.array(["1/1/1000", "1/1/2000"], dtype=object) + result, _ = tslib.array_to_datetime(arr, errors="coerce") - @pytest.mark.parametrize('invalid_date', [ - date(1000, 1, 1), - datetime(1000, 1, 1), - '1000-01-01', - 'Jan 1, 1000', - np.datetime64('1000-01-01')]) - def test_coerce_outside_ns_bounds(self, invalid_date): - arr = np.array([invalid_date], dtype='object') - with pytest.raises(ValueError): - tslib.array_to_datetime(arr, errors='raise') - - result, _ = tslib.array_to_datetime(arr, errors='coerce') - expected = np.array([iNaT], dtype='M8[ns]') - tm.assert_numpy_array_equal(result, expected) + expected = [iNaT, "2000-01-01T00:00:00.000000000-0000"] + expected = np_array_datetime64_compat(expected, dtype="M8[ns]") - def test_coerce_outside_ns_bounds_one_valid(self): - arr = np.array(['1/1/1000', '1/1/2000'], dtype=object) - result, _ = tslib.array_to_datetime(arr, errors='coerce') - expected = [iNaT, - '2000-01-01T00:00:00.000000000-0000'] - tm.assert_numpy_array_equal( - result, - np_array_datetime64_compat(expected, dtype='M8[ns]')) + tm.assert_numpy_array_equal(result, expected) - def test_coerce_of_invalid_datetimes(self): - arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object) - # Without coercing, the presence of any invalid dates prevents - # any values from being converted - result, _ = tslib.array_to_datetime(arr, errors='ignore') - tm.assert_numpy_array_equal(result, arr) +@pytest.mark.parametrize("errors", ["ignore", "coerce"]) +def test_coerce_of_invalid_datetimes(errors): + arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object) + kwargs = dict(values=arr, errors=errors) + if errors == "ignore": + # Without coercing, the presence of any invalid + # dates prevents any values from being converted. + result, _ = tslib.array_to_datetime(**kwargs) + tm.assert_numpy_array_equal(result, arr) + else: # coerce. # With coercing, the invalid dates becomes iNaT - result, _ = tslib.array_to_datetime(arr, errors='coerce') - expected = ['2013-01-01T00:00:00.000000000-0000', + result, _ = tslib.array_to_datetime(arr, errors="coerce") + expected = ["2013-01-01T00:00:00.000000000-0000", iNaT, iNaT] tm.assert_numpy_array_equal( result, - np_array_datetime64_compat(expected, dtype='M8[ns]')) - - def test_to_datetime_barely_out_of_bounds(self): - # GH#19529 - # GH#19382 close enough to bounds that dropping nanos would result - # in an in-bounds datetime - arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object) - with pytest.raises(tslib.OutOfBoundsDatetime): - tslib.array_to_datetime(arr) + np_array_datetime64_compat(expected, dtype="M8[ns]")) + + +def test_to_datetime_barely_out_of_bounds(): + # see gh-19382, gh-19529 + # + # Close enough to bounds that dropping nanos + # would result in an in-bounds datetime. + arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object) + msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16" + + with pytest.raises(tslib.OutOfBoundsDatetime, match=msg): + tslib.array_to_datetime(arr) diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py index b5d562a7b5a9c..255558a80018b 100644 --- a/pandas/tests/tslibs/test_ccalendar.py +++ b/pandas/tests/tslibs/test_ccalendar.py @@ -2,17 +2,24 @@ from datetime import datetime import numpy as np +import pytest from pandas._libs.tslibs import ccalendar -def test_get_day_of_year(): - assert ccalendar.get_day_of_year(2001, 3, 1) == 60 - assert ccalendar.get_day_of_year(2004, 3, 1) == 61 - assert ccalendar.get_day_of_year(1907, 12, 31) == 365 - assert ccalendar.get_day_of_year(2004, 12, 31) == 366 +@pytest.mark.parametrize("date_tuple,expected", [ + ((2001, 3, 1), 60), + ((2004, 3, 1), 61), + ((1907, 12, 31), 365), # End-of-year, non-leap year. + ((2004, 12, 31), 366), # End-of-year, leap year. +]) +def test_get_day_of_year_numeric(date_tuple, expected): + assert ccalendar.get_day_of_year(*date_tuple) == expected + +def test_get_day_of_year_dt(): dt = datetime.fromordinal(1 + np.random.randint(365 * 4000)) result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) + expected = (dt - dt.replace(month=1, day=1)).days + 1 assert result == expected diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 6bfc686ba830e..13398a69b4982 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -11,61 +11,58 @@ import pandas.util.testing as tm -def compare_utc_to_local(tz_didx, utc_didx): - f = lambda x: conversion.tz_convert_single(x, UTC, tz_didx.tz) +def _compare_utc_to_local(tz_didx): + def f(x): + return conversion.tz_convert_single(x, UTC, tz_didx.tz) + result = conversion.tz_convert(tz_didx.asi8, UTC, tz_didx.tz) - result_single = np.vectorize(f)(tz_didx.asi8) - tm.assert_numpy_array_equal(result, result_single) + expected = np.vectorize(f)(tz_didx.asi8) + + tm.assert_numpy_array_equal(result, expected) -def compare_local_to_utc(tz_didx, utc_didx): - f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, UTC) +def _compare_local_to_utc(tz_didx, utc_didx): + def f(x): + return conversion.tz_convert_single(x, tz_didx.tz, UTC) + result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, UTC) - result_single = np.vectorize(f)(utc_didx.asi8) - tm.assert_numpy_array_equal(result, result_single) - - -class TestTZConvert(object): - - @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', - 'US/Eastern', 'Europe/Moscow']) - def test_tz_convert_single_matches_tz_convert_hourly(self, tz): - # US: 2014-03-09 - 2014-11-11 - # MOSCOW: 2014-10-26 / 2014-12-31 - tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz) - utc_didx = date_range('2014-03-01', '2015-01-10', freq='H') - compare_utc_to_local(tz_didx, utc_didx) - - # local tz to UTC can be differ in hourly (or higher) freqs because - # of DST - compare_local_to_utc(tz_didx, utc_didx) - - @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', - 'US/Eastern', 'Europe/Moscow']) - @pytest.mark.parametrize('freq', ['D', 'A']) - def test_tz_convert_single_matches_tz_convert(self, tz, freq): - tz_didx = date_range('2000-01-01', '2020-01-01', freq=freq, tz=tz) - utc_didx = date_range('2000-01-01', '2020-01-01', freq=freq) - compare_utc_to_local(tz_didx, utc_didx) - compare_local_to_utc(tz_didx, utc_didx) - - @pytest.mark.parametrize('arr', [ - pytest.param(np.array([], dtype=np.int64), id='empty'), - pytest.param(np.array([iNaT], dtype=np.int64), id='all_nat')]) - def test_tz_convert_corner(self, arr): - result = conversion.tz_convert(arr, - timezones.maybe_get_tz('US/Eastern'), - timezones.maybe_get_tz('Asia/Tokyo')) - tm.assert_numpy_array_equal(result, arr) - - -class TestEnsureDatetime64NS(object): - @pytest.mark.parametrize('copy', [True, False]) - @pytest.mark.parametrize('dtype', ['M8[ns]', 'M8[s]']) - def test_length_zero_copy(self, dtype, copy): - arr = np.array([], dtype=dtype) - result = conversion.ensure_datetime64ns(arr, copy=copy) - if copy: - assert result.base is None - else: - assert result.base is arr + expected = np.vectorize(f)(utc_didx.asi8) + + tm.assert_numpy_array_equal(result, expected) + + +def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture): + tz = tz_aware_fixture + tz_didx = date_range("2014-03-01", "2015-01-10", freq="H", tz=tz) + utc_didx = date_range("2014-03-01", "2015-01-10", freq="H") + + _compare_utc_to_local(tz_didx) + _compare_local_to_utc(tz_didx, utc_didx) + + +@pytest.mark.parametrize("freq", ["D", "A"]) +def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): + tz = tz_aware_fixture + tz_didx = date_range("2000-01-01", "2020-01-01", freq=freq, tz=tz) + utc_didx = date_range("2000-01-01", "2020-01-01", freq=freq) + + _compare_utc_to_local(tz_didx) + _compare_local_to_utc(tz_didx, utc_didx) + + +@pytest.mark.parametrize("arr", [ + pytest.param(np.array([], dtype=np.int64), id="empty"), + pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat")]) +def test_tz_convert_corner(arr): + result = conversion.tz_convert(arr, + timezones.maybe_get_tz("US/Eastern"), + timezones.maybe_get_tz("Asia/Tokyo")) + tm.assert_numpy_array_equal(result, arr) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"]) +def test_length_zero_copy(dtype, copy): + arr = np.array([], dtype=dtype) + result = conversion.ensure_datetime64ns(arr, copy=copy) + assert result.base is (None if copy else arr) diff --git a/pandas/tests/tslibs/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py index 1bf6d0596e2fe..b9b1c72dbf2e1 100644 --- a/pandas/tests/tslibs/test_libfrequencies.py +++ b/pandas/tests/tslibs/test_libfrequencies.py @@ -9,108 +9,92 @@ from pandas.tseries import offsets -def assert_aliases_deprecated(freq, expected, aliases): +@pytest.mark.parametrize("obj,expected", [ + ("W", "DEC"), + (offsets.Week(), "DEC"), + + ("D", "DEC"), + (offsets.Day(), "DEC"), + + ("Q", "DEC"), + (offsets.QuarterEnd(startingMonth=12), "DEC"), + + ("Q-JAN", "JAN"), + (offsets.QuarterEnd(startingMonth=1), "JAN"), + + ("A-DEC", "DEC"), + ("Y-DEC", "DEC"), + (offsets.YearEnd(), "DEC"), + + ("A-MAY", "MAY"), + ("Y-MAY", "MAY"), + (offsets.YearEnd(month=5), "MAY") +]) +def test_get_rule_month(obj, expected): + result = get_rule_month(obj) + assert result == expected + + +@pytest.mark.parametrize("obj,expected", [ + ("A", 1000), + ("A-DEC", 1000), + ("A-JAN", 1001), + + ("Y", 1000), + ("Y-DEC", 1000), + ("Y-JAN", 1001), + + ("Q", 2000), + ("Q-DEC", 2000), + ("Q-FEB", 2002), + + ("W", 4000), + ("W-SUN", 4000), + ("W-FRI", 4005), + + ("Min", 8000), + ("ms", 10000), + ("US", 11000), + ("NS", 12000) +]) +def test_period_str_to_code(obj, expected): + assert _period_str_to_code(obj) == expected + + +@pytest.mark.parametrize("p1,p2,expected", [ + # Input validation. + (offsets.MonthEnd(), None, False), + (offsets.YearEnd(), None, False), + (None, offsets.YearEnd(), False), + (None, offsets.MonthEnd(), False), + (None, None, False), + + (offsets.YearEnd(), offsets.MonthEnd(), True), + (offsets.Hour(), offsets.Minute(), True), + (offsets.Second(), offsets.Milli(), True), + (offsets.Milli(), offsets.Micro(), True), + (offsets.Micro(), offsets.Nano(), True) +]) +def test_super_sub_symmetry(p1, p2, expected): + assert is_superperiod(p1, p2) is expected + assert is_subperiod(p2, p1) is expected + + +@pytest.mark.parametrize("freq,expected,aliases", [ + ("D", 6000, ["DAY", "DLY", "DAILY"]), + ("M", 3000, ["MTH", "MONTH", "MONTHLY"]), + ("N", 12000, ["NANOSECOND", "NANOSECONDLY"]), + ("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]), + ("T", 8000, ["minute", "MINUTE", "MINUTELY"]), + ("L", 10000, ["MILLISECOND", "MILLISECONDLY"]), + ("U", 11000, ["MICROSECOND", "MICROSECONDLY"]), + ("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]), + ("B", 5000, ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]), +]) +def test_assert_aliases_deprecated(freq, expected, aliases): assert isinstance(aliases, list) - assert (_period_str_to_code(freq) == expected) + assert _period_str_to_code(freq) == expected for alias in aliases: with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): _period_str_to_code(alias) - - -def test_get_rule_month(): - result = get_rule_month('W') - assert (result == 'DEC') - result = get_rule_month(offsets.Week()) - assert (result == 'DEC') - - result = get_rule_month('D') - assert (result == 'DEC') - result = get_rule_month(offsets.Day()) - assert (result == 'DEC') - - result = get_rule_month('Q') - assert (result == 'DEC') - result = get_rule_month(offsets.QuarterEnd(startingMonth=12)) - - result = get_rule_month('Q-JAN') - assert (result == 'JAN') - result = get_rule_month(offsets.QuarterEnd(startingMonth=1)) - assert (result == 'JAN') - - result = get_rule_month('A-DEC') - assert (result == 'DEC') - result = get_rule_month('Y-DEC') - assert (result == 'DEC') - result = get_rule_month(offsets.YearEnd()) - assert (result == 'DEC') - - result = get_rule_month('A-MAY') - assert (result == 'MAY') - result = get_rule_month('Y-MAY') - assert (result == 'MAY') - result = get_rule_month(offsets.YearEnd(month=5)) - assert (result == 'MAY') - - -def test_period_str_to_code(): - assert (_period_str_to_code('A') == 1000) - assert (_period_str_to_code('A-DEC') == 1000) - assert (_period_str_to_code('A-JAN') == 1001) - assert (_period_str_to_code('Y') == 1000) - assert (_period_str_to_code('Y-DEC') == 1000) - assert (_period_str_to_code('Y-JAN') == 1001) - - assert (_period_str_to_code('Q') == 2000) - assert (_period_str_to_code('Q-DEC') == 2000) - assert (_period_str_to_code('Q-FEB') == 2002) - - assert_aliases_deprecated("M", 3000, ["MTH", "MONTH", "MONTHLY"]) - - assert (_period_str_to_code('W') == 4000) - assert (_period_str_to_code('W-SUN') == 4000) - assert (_period_str_to_code('W-FRI') == 4005) - - assert_aliases_deprecated("B", 5000, ["BUS", "BUSINESS", - "BUSINESSLY", "WEEKDAY"]) - assert_aliases_deprecated("D", 6000, ["DAY", "DLY", "DAILY"]) - assert_aliases_deprecated("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]) - - assert_aliases_deprecated("T", 8000, ["minute", "MINUTE", "MINUTELY"]) - assert (_period_str_to_code('Min') == 8000) - - assert_aliases_deprecated("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]) - assert_aliases_deprecated("L", 10000, ["MILLISECOND", "MILLISECONDLY"]) - assert (_period_str_to_code('ms') == 10000) - - assert_aliases_deprecated("U", 11000, ["MICROSECOND", "MICROSECONDLY"]) - assert (_period_str_to_code('US') == 11000) - - assert_aliases_deprecated("N", 12000, ["NANOSECOND", "NANOSECONDLY"]) - assert (_period_str_to_code('NS') == 12000) - - -def test_is_superperiod_subperiod(): - - # input validation - assert not (is_superperiod(offsets.YearEnd(), None)) - assert not (is_subperiod(offsets.MonthEnd(), None)) - assert not (is_superperiod(None, offsets.YearEnd())) - assert not (is_subperiod(None, offsets.MonthEnd())) - assert not (is_superperiod(None, None)) - assert not (is_subperiod(None, None)) - - assert (is_superperiod(offsets.YearEnd(), offsets.MonthEnd())) - assert (is_subperiod(offsets.MonthEnd(), offsets.YearEnd())) - - assert (is_superperiod(offsets.Hour(), offsets.Minute())) - assert (is_subperiod(offsets.Minute(), offsets.Hour())) - - assert (is_superperiod(offsets.Second(), offsets.Milli())) - assert (is_subperiod(offsets.Milli(), offsets.Second())) - - assert (is_superperiod(offsets.Milli(), offsets.Micro())) - assert (is_subperiod(offsets.Micro(), offsets.Milli())) - - assert (is_superperiod(offsets.Micro(), offsets.Nano())) - assert (is_subperiod(offsets.Nano(), offsets.Micro())) diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py index 388df6453634e..cb699278595e7 100644 --- a/pandas/tests/tslibs/test_liboffsets.py +++ b/pandas/tests/tslibs/test_liboffsets.py @@ -12,161 +12,163 @@ from pandas import Timestamp -def test_get_lastbday(): +@pytest.fixture(params=["start", "end", "business_start", "business_end"]) +def day_opt(request): + return request.param + + +@pytest.mark.parametrize("dt,exp_week_day,exp_last_day", [ + (datetime(2017, 11, 30), 3, 30), # Business day. + (datetime(1993, 10, 31), 6, 29) # Non-business day. +]) +def test_get_last_bday(dt, exp_week_day, exp_last_day): + assert dt.weekday() == exp_week_day + assert liboffsets.get_lastbday(dt.year, dt.month) == exp_last_day + + +@pytest.mark.parametrize("dt,exp_week_day,exp_first_day", [ + (datetime(2017, 4, 1), 5, 3), # Non-weekday. + (datetime(1993, 10, 1), 4, 1) # Business day. +]) +def test_get_first_bday(dt, exp_week_day, exp_first_day): + assert dt.weekday() == exp_week_day + assert liboffsets.get_firstbday(dt.year, dt.month) == exp_first_day + + +@pytest.mark.parametrize("months,day_opt,expected", [ + (0, 15, datetime(2017, 11, 15)), + (0, None, datetime(2017, 11, 30)), + (1, "start", datetime(2017, 12, 1)), + (-145, "end", datetime(2005, 10, 31)), + (0, "business_end", datetime(2017, 11, 30)), + (0, "business_start", datetime(2017, 11, 1)) +]) +def test_shift_month_dt(months, day_opt, expected): dt = datetime(2017, 11, 30) - assert dt.weekday() == 3 # i.e. this is a business day - assert liboffsets.get_lastbday(dt.year, dt.month) == 30 + assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected - dt = datetime(1993, 10, 31) - assert dt.weekday() == 6 # i.e. this is not a business day - assert liboffsets.get_lastbday(dt.year, dt.month) == 29 +@pytest.mark.parametrize("months,day_opt,expected", [ + (1, "start", Timestamp("1929-06-01")), + (-3, "end", Timestamp("1929-02-28")), + (25, None, Timestamp("1931-06-5")), + (-1, 31, Timestamp("1929-04-30")) +]) +def test_shift_month_ts(months, day_opt, expected): + ts = Timestamp("1929-05-05") + assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected -def test_get_firstbday(): - dt = datetime(2017, 4, 1) - assert dt.weekday() == 5 # i.e. not a weekday - assert liboffsets.get_firstbday(dt.year, dt.month) == 3 - - dt = datetime(1993, 10, 1) - assert dt.weekday() == 4 # i.e. a business day - assert liboffsets.get_firstbday(dt.year, dt.month) == 1 - - -def test_shift_month(): - dt = datetime(2017, 11, 30) - assert liboffsets.shift_month(dt, 0, 'business_end') == dt - assert liboffsets.shift_month(dt, 0, - 'business_start') == datetime(2017, 11, 1) - - ts = Timestamp('1929-05-05') - assert liboffsets.shift_month(ts, 1, 'start') == Timestamp('1929-06-01') - assert liboffsets.shift_month(ts, -3, 'end') == Timestamp('1929-02-28') - - assert liboffsets.shift_month(ts, 25, None) == Timestamp('1931-06-5') - - # Try to shift to April 31, then shift back to Apr 30 to get a real date - assert liboffsets.shift_month(ts, -1, 31) == Timestamp('1929-04-30') +def test_shift_month_error(): dt = datetime(2017, 11, 15) + day_opt = "this should raise" - assert liboffsets.shift_month(dt, 0, day_opt=None) == dt - assert liboffsets.shift_month(dt, 0, day_opt=15) == dt + with pytest.raises(ValueError, match=day_opt): + liboffsets.shift_month(dt, 3, day_opt=day_opt) - assert liboffsets.shift_month(dt, 1, - day_opt='start') == datetime(2017, 12, 1) - assert liboffsets.shift_month(dt, -145, - day_opt='end') == datetime(2005, 10, 31) +@pytest.mark.parametrize("other,expected", [ + # Before March 1. + (datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}), - with pytest.raises(ValueError): - liboffsets.shift_month(dt, 3, day_opt='this should raise') + # After March 1. + (Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}) +]) +@pytest.mark.parametrize("n", [2, -7, 0]) +def test_roll_yearday(other, expected, n): + month = 3 + day_opt = "start" # `other` will be compared to March 1. + assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n] -def test_get_day_of_month(): - # get_day_of_month is not directly exposed; we test it via roll_yearday - dt = datetime(2017, 11, 15) - with pytest.raises(ValueError): - # To hit the raising case we need month == dt.month and n > 0 - liboffsets.roll_yearday(dt, n=3, month=11, day_opt='foo') +@pytest.mark.parametrize("other,expected", [ + # Before June 30. + (datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}), + # After June 30. + (Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}) +]) +@pytest.mark.parametrize("n", [5, -7, 0]) +def test_roll_yearday2(other, expected, n): + month = 6 + day_opt = "end" # `other` will be compared to June 30. -def test_roll_yearday(): - # Copied from doctest examples - month = 3 - day_opt = 'start' # `other` will be compared to March 1 - other = datetime(2017, 2, 10) # before March 1 - assert liboffsets.roll_yearday(other, 2, month, day_opt) == 1 - assert liboffsets.roll_yearday(other, -7, month, day_opt) == -7 - assert liboffsets.roll_yearday(other, 0, month, day_opt) == 0 + assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n] - other = Timestamp('2014-03-15', tz='US/Eastern') # after March 1 - assert liboffsets.roll_yearday(other, 2, month, day_opt) == 2 - assert liboffsets.roll_yearday(other, -7, month, day_opt) == -6 - assert liboffsets.roll_yearday(other, 0, month, day_opt) == 1 - month = 6 - day_opt = 'end' # `other` will be compared to June 30 - other = datetime(1999, 6, 29) # before June 30 - assert liboffsets.roll_yearday(other, 5, month, day_opt) == 4 - assert liboffsets.roll_yearday(other, -7, month, day_opt) == -7 - assert liboffsets.roll_yearday(other, 0, month, day_opt) == 0 - - other = Timestamp(2072, 8, 24, 6, 17, 18) # after June 30 - assert liboffsets.roll_yearday(other, 5, month, day_opt) == 5 - assert liboffsets.roll_yearday(other, -7, month, day_opt) == -6 - assert liboffsets.roll_yearday(other, 0, month, day_opt) == 1 - - -def test_roll_qtrday(): - other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday - for day_opt in ['start', 'end', 'business_start', 'business_end']: - # as long as (other.month % 3) != (month % 3), day_opt is irrelevant - # the `day_opt` doesn't matter. - month = 5 # (other.month % 3) < (month % 3) - assert roll_qtrday(other, 4, month, day_opt, modby=3) == 3 - assert roll_qtrday(other, -3, month, day_opt, modby=3) == -3 - - month = 3 # (other.month % 3) > (month % 3) - assert roll_qtrday(other, 4, month, day_opt, modby=3) == 4 - assert roll_qtrday(other, -3, month, day_opt, modby=3) == -2 - - month = 2 - other = datetime(1999, 5, 31) # Monday - # has (other.month % 3) == (month % 3) - - n = 2 - assert roll_qtrday(other, n, month, 'start', modby=3) == n - assert roll_qtrday(other, n, month, 'end', modby=3) == n - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - - n = -1 - assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1 - assert roll_qtrday(other, n, month, 'end', modby=3) == n - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + 1 - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - - other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday - month = 4 # (other.month % 3) == (month % 3) - n = 2 - assert roll_qtrday(other, n, month, 'start', modby=3) == n - assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1 - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - 1 - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1 - - n = -1 - assert roll_qtrday(other, n, month, 'start', modby=3) == n - assert roll_qtrday(other, n, month, 'end', modby=3) == n - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - - other = Timestamp(2072, 10, 3, 6, 17, 18) # First businessday - month = 4 # (other.month % 3) == (month % 3) - n = 2 - assert roll_qtrday(other, n, month, 'start', modby=3) == n - assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1 - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1 - - n = -1 - assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1 - assert roll_qtrday(other, n, month, 'end', modby=3) == n - assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - - -def test_roll_convention(): - other = 29 - before = 1 - after = 31 - - n = 42 - assert liboffsets.roll_convention(other, n, other) == n - assert liboffsets.roll_convention(other, n, before) == n - assert liboffsets.roll_convention(other, n, after) == n - 1 - - n = -4 - assert liboffsets.roll_convention(other, n, other) == n - assert liboffsets.roll_convention(other, n, before) == n + 1 - assert liboffsets.roll_convention(other, n, after) == n +def test_get_day_of_month_error(): + # get_day_of_month is not directly exposed. + # We test it via roll_yearday. + dt = datetime(2017, 11, 15) + day_opt = "foo" + + with pytest.raises(ValueError, match=day_opt): + # To hit the raising case we need month == dt.month and n > 0. + liboffsets.roll_yearday(dt, n=3, month=11, day_opt=day_opt) + + +@pytest.mark.parametrize("month", [ + 3, # (other.month % 3) < (month % 3) + 5 # (other.month % 3) > (month % 3) +]) +@pytest.mark.parametrize("n", [4, -3]) +def test_roll_qtr_day_not_mod_unequal(day_opt, month, n): + expected = { + 3: { + -3: -2, + 4: 4 + }, + 5: { + -3: -3, + 4: 3 + } + } + + other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday. + assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n] + + +@pytest.mark.parametrize("other,month,exp_dict", [ + # Monday. + (datetime(1999, 5, 31), 2, { + -1: { + "start": 0, + "business_start": 0 + } + }), + + # Saturday. + (Timestamp(2072, 10, 1, 6, 17, 18), 4, { + 2: { + "end": 1, + "business_end": 1, + "business_start": 1 + } + }), + + # First business day. + (Timestamp(2072, 10, 3, 6, 17, 18), 4, { + 2: { + "end": 1, + "business_end": 1 + }, + -1: { + "start": 0 + } + }) +]) +@pytest.mark.parametrize("n", [2, -1]) +def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt): + # All cases have (other.month % 3) == (month % 3). + expected = exp_dict.get(n, {}).get(day_opt, n) + assert roll_qtrday(other, n, month, day_opt, modby=3) == expected + + +@pytest.mark.parametrize("n,expected", [ + (42, {29: 42, 1: 42, 31: 41}), + (-4, {29: -4, 1: -3, 31: -4}) +]) +@pytest.mark.parametrize("compare", [29, 1, 31]) +def test_roll_convention(n, expected, compare): + assert liboffsets.roll_convention(29, n, compare) == expected[compare] diff --git a/pandas/tests/tslibs/test_normalize_date.py b/pandas/tests/tslibs/test_normalize_date.py new file mode 100644 index 0000000000000..6124121b97186 --- /dev/null +++ b/pandas/tests/tslibs/test_normalize_date.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Tests for functions from pandas._libs.tslibs""" + +from datetime import date, datetime + +import pytest + +from pandas._libs import tslibs + + +@pytest.mark.parametrize("value,expected", [ + (date(2012, 9, 7), datetime(2012, 9, 7)), + (datetime(2012, 9, 7, 12), datetime(2012, 9, 7)), + (datetime(2007, 10, 1, 1, 12, 5, 10), datetime(2007, 10, 1)) +]) +def test_normalize_date(value, expected): + result = tslibs.normalize_date(value) + assert result == expected diff --git a/pandas/tests/tslibs/test_parse_iso8601.py b/pandas/tests/tslibs/test_parse_iso8601.py new file mode 100644 index 0000000000000..d1b3dee948afe --- /dev/null +++ b/pandas/tests/tslibs/test_parse_iso8601.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +from datetime import datetime + +import pytest + +from pandas._libs import tslib + + +@pytest.mark.parametrize("date_str, exp", [ + ("2011-01-02", datetime(2011, 1, 2)), + ("2011-1-2", datetime(2011, 1, 2)), + ("2011-01", datetime(2011, 1, 1)), + ("2011-1", datetime(2011, 1, 1)), + ("2011 01 02", datetime(2011, 1, 2)), + ("2011.01.02", datetime(2011, 1, 2)), + ("2011/01/02", datetime(2011, 1, 2)), + ("2011\\01\\02", datetime(2011, 1, 2)), + ("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)), + ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30))]) +def test_parsers_iso8601(date_str, exp): + # see gh-12060 + # + # Test only the ISO parser - flexibility to + # different separators and leading zero's. + actual = tslib._test_parse_iso8601(date_str) + assert actual == exp + + +@pytest.mark.parametrize("date_str", [ + "2011-01/02", + "2011=11=11", + "201401", + "201111", + "200101", + + # Mixed separated and unseparated. + "2005-0101", + "200501-01", + "20010101 12:3456", + "20010101 1234:56", + + # HHMMSS must have two digits in + # each component if unseparated. + "20010101 1", + "20010101 123", + "20010101 12345", + "20010101 12345Z", +]) +def test_parsers_iso8601_invalid(date_str): + msg = "Error parsing datetime string \"{s}\"".format(s=date_str) + + with pytest.raises(ValueError, match=msg): + tslib._test_parse_iso8601(date_str) + + +def test_parsers_iso8601_invalid_offset_invalid(): + date_str = "2001-01-01 12-34-56" + msg = ("Timezone hours offset out of range " + "in datetime string \"{s}\"".format(s=date_str)) + + with pytest.raises(ValueError, match=msg): + tslib._test_parse_iso8601(date_str) diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index 45a841cd1136d..597ec6df7389f 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -10,168 +10,177 @@ from pandas._libs.tslibs import parsing from pandas._libs.tslibs.parsing import parse_time_string -import pandas.compat as compat import pandas.util._test_decorators as td from pandas.util import testing as tm -class TestParseQuarters(object): - - def test_parse_time_string(self): - (date, parsed, reso) = parse_time_string('4Q1984') - (date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984') - assert date == date_lower - assert parsed == parsed_lower - assert reso == reso_lower - - def test_parse_time_quarter_w_dash(self): - # https://github.com/pandas-dev/pandas/issue/9688 - pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988')] - - for dashed, normal in pairs: - (date_dash, parsed_dash, reso_dash) = parse_time_string(dashed) - (date, parsed, reso) = parse_time_string(normal) - - assert date_dash == date - assert parsed_dash == parsed - assert reso_dash == reso - - pytest.raises(parsing.DateParseError, parse_time_string, "-2Q1992") - pytest.raises(parsing.DateParseError, parse_time_string, "2-Q1992") - pytest.raises(parsing.DateParseError, parse_time_string, "4-4Q1992") - - -class TestDatetimeParsingWrappers(object): - def test_does_not_convert_mixed_integer(self): - bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T') - - for bad_date_string in bad_date_strings: - assert not parsing._does_string_look_like_datetime(bad_date_string) - - good_date_strings = ('2012-01-01', - '01/01/2012', - 'Mon Sep 16, 2013', - '01012012', - '0101', - '1-1') - - for good_date_string in good_date_strings: - assert parsing._does_string_look_like_datetime(good_date_string) - - def test_parsers_quarterly_with_freq(self): - msg = ('Incorrect quarterly string is given, quarter ' - 'must be between 1 and 4: 2013Q5') - with pytest.raises(parsing.DateParseError, match=msg): - parsing.parse_time_string('2013Q5') - - # GH 5418 - msg = ('Unable to retrieve month information from given freq: ' - 'INVLD-L-DEC-SAT') - with pytest.raises(parsing.DateParseError, match=msg): - parsing.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT') - - cases = {('2013Q2', None): datetime(2013, 4, 1), - ('2013Q2', 'A-APR'): datetime(2012, 8, 1), - ('2013-Q2', 'A-DEC'): datetime(2013, 4, 1)} - - for (date_str, freq), exp in compat.iteritems(cases): - result, _, _ = parsing.parse_time_string(date_str, freq=freq) - assert result == exp - - def test_parsers_quarter_invalid(self): - - cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.'] - for case in cases: - pytest.raises(ValueError, parsing.parse_time_string, case) - - def test_parsers_monthfreq(self): - cases = {'201101': datetime(2011, 1, 1, 0, 0), - '200005': datetime(2000, 5, 1, 0, 0)} - - for date_str, expected in compat.iteritems(cases): - result1, _, _ = parsing.parse_time_string(date_str, freq='M') - assert result1 == expected - - -class TestGuessDatetimeFormat(object): - - @td.skip_if_not_us_locale - @pytest.mark.parametrize( - "string, format", - [ - ('20111230', '%Y%m%d'), - ('2011-12-30', '%Y-%m-%d'), - ('30-12-2011', '%d-%m-%Y'), - ('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'), - ('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'), - ('2011-12-30 00:00:00.000000', - '%Y-%m-%d %H:%M:%S.%f')]) - def test_guess_datetime_format_with_parseable_formats( - self, string, format): - result = parsing._guess_datetime_format(string) - assert result == format - - @pytest.mark.parametrize( - "dayfirst, expected", - [ - (True, "%d/%m/%Y"), - (False, "%m/%d/%Y")]) - def test_guess_datetime_format_with_dayfirst(self, dayfirst, expected): - ambiguous_string = '01/01/2011' - result = parsing._guess_datetime_format( - ambiguous_string, dayfirst=dayfirst) - assert result == expected - - @td.skip_if_has_locale - @pytest.mark.parametrize( - "string, format", - [ - ('30/Dec/2011', '%d/%b/%Y'), - ('30/December/2011', '%d/%B/%Y'), - ('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S')]) - def test_guess_datetime_format_with_locale_specific_formats( - self, string, format): - result = parsing._guess_datetime_format(string) - assert result == format - - def test_guess_datetime_format_invalid_inputs(self): - # A datetime string must include a year, month and a day for it - # to be guessable, in addition to being a string that looks like - # a datetime - invalid_dts = [ - '2013', - '01/2013', - '12:00:00', - '1/1/1/1', - 'this_is_not_a_datetime', - '51a', - 9, - datetime(2011, 1, 1), - ] - - for invalid_dt in invalid_dts: - assert parsing._guess_datetime_format(invalid_dt) is None - - @pytest.mark.parametrize( - "string, format", - [ - ('2011-1-1', '%Y-%m-%d'), - ('30-1-2011', '%d-%m-%Y'), - ('1/1/2011', '%m/%d/%Y'), - ('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'), - ('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'), - ('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S')]) - def test_guess_datetime_format_nopadding(self, string, format): - # GH 11142 - result = parsing._guess_datetime_format(string) - assert result == format - - -class TestArrayToDatetime(object): - def test_try_parse_dates(self): - arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object) - - result = parsing.try_parse_dates(arr, dayfirst=True) - expected = np.array([parse(d, dayfirst=True) for d in arr]) - tm.assert_numpy_array_equal(result, expected) +def test_parse_time_string(): + (date, parsed, reso) = parse_time_string("4Q1984") + (date_lower, parsed_lower, reso_lower) = parse_time_string("4q1984") + + assert date == date_lower + assert reso == reso_lower + assert parsed == parsed_lower + + +@pytest.mark.parametrize("dashed,normal", [ + ("1988-Q2", "1988Q2"), + ("2Q-1988", "2Q1988") +]) +def test_parse_time_quarter_with_dash(dashed, normal): + # see gh-9688 + (date_dash, parsed_dash, reso_dash) = parse_time_string(dashed) + (date, parsed, reso) = parse_time_string(normal) + + assert date_dash == date + assert parsed_dash == parsed + assert reso_dash == reso + + +@pytest.mark.parametrize("dashed", [ + "-2Q1992", "2-Q1992", "4-4Q1992" +]) +def test_parse_time_quarter_with_dash_error(dashed): + msg = ("Unknown datetime string format, " + "unable to parse: {dashed}".format(dashed=dashed)) + + with pytest.raises(parsing.DateParseError, match=msg): + parse_time_string(dashed) + + +@pytest.mark.parametrize("date_string,expected", [ + ("123.1234", False), + ("-50000", False), + ("999", False), + ("m", False), + ("T", False), + + ("Mon Sep 16, 2013", True), + ("2012-01-01", True), + ("01/01/2012", True), + ("01012012", True), + ("0101", True), + ("1-1", True) +]) +def test_does_not_convert_mixed_integer(date_string, expected): + assert parsing._does_string_look_like_datetime(date_string) is expected + + +@pytest.mark.parametrize("date_str,kwargs,msg", [ + ("2013Q5", dict(), + ("Incorrect quarterly string is given, " + "quarter must be between 1 and 4: 2013Q5")), + + # see gh-5418 + ("2013Q1", dict(freq="INVLD-L-DEC-SAT"), + ("Unable to retrieve month information " + "from given freq: INVLD-L-DEC-SAT")) +]) +def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg): + with pytest.raises(parsing.DateParseError, match=msg): + parsing.parse_time_string(date_str, **kwargs) + + +@pytest.mark.parametrize("date_str,freq,expected", [ + ("2013Q2", None, datetime(2013, 4, 1)), + ("2013Q2", "A-APR", datetime(2012, 8, 1)), + ("2013-Q2", "A-DEC", datetime(2013, 4, 1)) +]) +def test_parsers_quarterly_with_freq(date_str, freq, expected): + result, _, _ = parsing.parse_time_string(date_str, freq=freq) + assert result == expected + + +@pytest.mark.parametrize("date_str", [ + "2Q 2005", "2Q-200A", "2Q-200", + "22Q2005", "2Q200.", "6Q-20" +]) +def test_parsers_quarter_invalid(date_str): + if date_str == "6Q-20": + msg = ("Incorrect quarterly string is given, quarter " + "must be between 1 and 4: {date_str}".format(date_str=date_str)) + else: + msg = ("Unknown datetime string format, unable " + "to parse: {date_str}".format(date_str=date_str)) + + with pytest.raises(ValueError, match=msg): + parsing.parse_time_string(date_str) + + +@pytest.mark.parametrize("date_str,expected", [ + ("201101", datetime(2011, 1, 1, 0, 0)), + ("200005", datetime(2000, 5, 1, 0, 0)) +]) +def test_parsers_month_freq(date_str, expected): + result, _, _ = parsing.parse_time_string(date_str, freq="M") + assert result == expected + + +@td.skip_if_not_us_locale +@pytest.mark.parametrize("string,fmt", [ + ("20111230", "%Y%m%d"), + ("2011-12-30", "%Y-%m-%d"), + ("30-12-2011", "%d-%m-%Y"), + ("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"), + ("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"), + ("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f") +]) +def test_guess_datetime_format_with_parseable_formats(string, fmt): + result = parsing._guess_datetime_format(string) + assert result == fmt + + +@pytest.mark.parametrize("dayfirst,expected", [ + (True, "%d/%m/%Y"), + (False, "%m/%d/%Y") +]) +def test_guess_datetime_format_with_dayfirst(dayfirst, expected): + ambiguous_string = "01/01/2011" + result = parsing._guess_datetime_format(ambiguous_string, + dayfirst=dayfirst) + assert result == expected + + +@td.skip_if_has_locale +@pytest.mark.parametrize("string,fmt", [ + ("30/Dec/2011", "%d/%b/%Y"), + ("30/December/2011", "%d/%B/%Y"), + ("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S") +]) +def test_guess_datetime_format_with_locale_specific_formats(string, fmt): + result = parsing._guess_datetime_format(string) + assert result == fmt + + +@pytest.mark.parametrize("invalid_dt", [ + "2013", "01/2013", "12:00:00", "1/1/1/1", + "this_is_not_a_datetime", "51a", 9, + datetime(2011, 1, 1) +]) +def test_guess_datetime_format_invalid_inputs(invalid_dt): + # A datetime string must include a year, month and a day for it to be + # guessable, in addition to being a string that looks like a datetime. + assert parsing._guess_datetime_format(invalid_dt) is None + + +@pytest.mark.parametrize("string,fmt", [ + ("2011-1-1", "%Y-%m-%d"), + ("1/1/2011", "%m/%d/%Y"), + ("30-1-2011", "%d-%m-%Y"), + ("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"), + ("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"), + ("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S") +]) +def test_guess_datetime_format_no_padding(string, fmt): + # see gh-11142 + result = parsing._guess_datetime_format(string) + assert result == fmt + + +def test_try_parse_dates(): + arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object) + result = parsing.try_parse_dates(arr, dayfirst=True) + + expected = np.array([parse(d, dayfirst=True) for d in arr]) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/tslibs/test_period_asfreq.py b/pandas/tests/tslibs/test_period_asfreq.py index e5978a59bc2a1..6a9522e705318 100644 --- a/pandas/tests/tslibs/test_period_asfreq.py +++ b/pandas/tests/tslibs/test_period_asfreq.py @@ -1,82 +1,87 @@ # -*- coding: utf-8 -*- +import pytest + from pandas._libs.tslibs.frequencies import get_freq from pandas._libs.tslibs.period import period_asfreq, period_ordinal -class TestPeriodFreqConversion(object): - - def test_intraday_conversion_factors(self): - assert period_asfreq(1, get_freq('D'), get_freq('H'), False) == 24 - assert period_asfreq(1, get_freq('D'), get_freq('T'), False) == 1440 - assert period_asfreq(1, get_freq('D'), get_freq('S'), False) == 86400 - assert period_asfreq(1, get_freq('D'), - get_freq('L'), False) == 86400000 - assert period_asfreq(1, get_freq('D'), - get_freq('U'), False) == 86400000000 - assert period_asfreq(1, get_freq('D'), - get_freq('N'), False) == 86400000000000 - - assert period_asfreq(1, get_freq('H'), get_freq('T'), False) == 60 - assert period_asfreq(1, get_freq('H'), get_freq('S'), False) == 3600 - assert period_asfreq(1, get_freq('H'), - get_freq('L'), False) == 3600000 - assert period_asfreq(1, get_freq('H'), - get_freq('U'), False) == 3600000000 - assert period_asfreq(1, get_freq('H'), - get_freq('N'), False) == 3600000000000 - - assert period_asfreq(1, get_freq('T'), get_freq('S'), False) == 60 - assert period_asfreq(1, get_freq('T'), get_freq('L'), False) == 60000 - assert period_asfreq(1, get_freq('T'), - get_freq('U'), False) == 60000000 - assert period_asfreq(1, get_freq('T'), - get_freq('N'), False) == 60000000000 - - assert period_asfreq(1, get_freq('S'), get_freq('L'), False) == 1000 - assert period_asfreq(1, get_freq('S'), - get_freq('U'), False) == 1000000 - assert period_asfreq(1, get_freq('S'), - get_freq('N'), False) == 1000000000 - - assert period_asfreq(1, get_freq('L'), get_freq('U'), False) == 1000 - assert period_asfreq(1, get_freq('L'), - get_freq('N'), False) == 1000000 - - assert period_asfreq(1, get_freq('U'), get_freq('N'), False) == 1000 - - def test_period_ordinal_start_values(self): - # information for 1.1.1970 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('A')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')) == 1 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')) == 0 - - def test_period_ordinal_week(self): - assert period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')) == 1 - assert period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')) == 2 - assert period_ordinal(2013, 10, 6, 0, - 0, 0, 0, 0, get_freq('W')) == 2284 - assert period_ordinal(2013, 10, 7, 0, - 0, 0, 0, 0, get_freq('W')) == 2285 - - def test_period_ordinal_business_day(self): - # Thursday - assert period_ordinal(2013, 10, 3, 0, - 0, 0, 0, 0, get_freq('B')) == 11415 - # Friday - assert period_ordinal(2013, 10, 4, 0, - 0, 0, 0, 0, get_freq('B')) == 11416 - # Saturday - assert period_ordinal(2013, 10, 5, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Sunday - assert period_ordinal(2013, 10, 6, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Monday - assert period_ordinal(2013, 10, 7, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Tuesday - assert period_ordinal(2013, 10, 8, 0, - 0, 0, 0, 0, get_freq('B')) == 11418 +@pytest.mark.parametrize("freq1,freq2,expected", [ + ("D", "H", 24), + ("D", "T", 1440), + ("D", "S", 86400), + ("D", "L", 86400000), + ("D", "U", 86400000000), + ("D", "N", 86400000000000), + + ("H", "T", 60), + ("H", "S", 3600), + ("H", "L", 3600000), + ("H", "U", 3600000000), + ("H", "N", 3600000000000), + + ("T", "S", 60), + ("T", "L", 60000), + ("T", "U", 60000000), + ("T", "N", 60000000000), + + ("S", "L", 1000), + ("S", "U", 1000000), + ("S", "N", 1000000000), + + ("L", "U", 1000), + ("L", "N", 1000000), + + ("U", "N", 1000) +]) +def test_intra_day_conversion_factors(freq1, freq2, expected): + assert period_asfreq(1, get_freq(freq1), + get_freq(freq2), False) == expected + + +@pytest.mark.parametrize("freq,expected", [ + ("A", 0), + ("M", 0), + ("W", 1), + ("D", 0), + ("B", 0) +]) +def test_period_ordinal_start_values(freq, expected): + # information for Jan. 1, 1970. + assert period_ordinal(1970, 1, 1, 0, 0, 0, + 0, 0, get_freq(freq)) == expected + + +@pytest.mark.parametrize("dt,expected", [ + ((1970, 1, 4, 0, 0, 0, 0, 0), 1), + ((1970, 1, 5, 0, 0, 0, 0, 0), 2), + ((2013, 10, 6, 0, 0, 0, 0, 0), 2284), + ((2013, 10, 7, 0, 0, 0, 0, 0), 2285) +]) +def test_period_ordinal_week(dt, expected): + args = dt + (get_freq("W"),) + assert period_ordinal(*args) == expected + + +@pytest.mark.parametrize("day,expected", [ + # Thursday (Oct. 3, 2013). + (3, 11415), + + # Friday (Oct. 4, 2013). + (4, 11416), + + # Saturday (Oct. 5, 2013). + (5, 11417), + + # Sunday (Oct. 6, 2013). + (6, 11417), + + # Monday (Oct. 7, 2013). + (7, 11417), + + # Tuesday (Oct. 8, 2013). + (8, 11418) +]) +def test_period_ordinal_business_day(day, expected): + args = (2013, 10, day, 0, 0, 0, 0, 0, get_freq("B")) + assert period_ordinal(*args) == expected diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py index 50e64bb7c2082..fdc8eff80acad 100644 --- a/pandas/tests/tslibs/test_timedeltas.py +++ b/pandas/tests/tslibs/test_timedeltas.py @@ -5,37 +5,25 @@ from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds import pandas as pd - - -def test_delta_to_nanoseconds(): - obj = np.timedelta64(14, 'D') - result = delta_to_nanoseconds(obj) - assert result == 14 * 24 * 3600 * 1e9 - - obj = pd.Timedelta(minutes=-7) - result = delta_to_nanoseconds(obj) - assert result == -7 * 60 * 1e9 - - obj = pd.Timedelta(minutes=-7).to_pytimedelta() +from pandas import Timedelta + + +@pytest.mark.parametrize("obj,expected", [ + (np.timedelta64(14, "D"), 14 * 24 * 3600 * 1e9), + (Timedelta(minutes=-7), -7 * 60 * 1e9), + (Timedelta(minutes=-7).to_pytimedelta(), -7 * 60 * 1e9), + (pd.offsets.Nano(125), 125), + (1, 1), + (np.int64(2), 2), + (np.int32(3), 3) +]) +def test_delta_to_nanoseconds(obj, expected): result = delta_to_nanoseconds(obj) - assert result == -7 * 60 * 1e9 + assert result == expected - obj = pd.offsets.Nano(125) - result = delta_to_nanoseconds(obj) - assert result == 125 - - obj = 1 - result = delta_to_nanoseconds(obj) - assert obj == 1 - obj = np.int64(2) - result = delta_to_nanoseconds(obj) - assert obj == 2 - - obj = np.int32(3) - result = delta_to_nanoseconds(obj) - assert result == 3 +def test_delta_to_nanoseconds_error(): + obj = np.array([123456789], dtype="m8[ns]") - obj = np.array([123456789], dtype='m8[ns]') - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="<(class|type) 'numpy.ndarray'>"): delta_to_nanoseconds(obj) diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py index 68a6c1b09b992..0255865dbdf71 100644 --- a/pandas/tests/tslibs/test_timezones.py +++ b/pandas/tests/tslibs/test_timezones.py @@ -10,39 +10,51 @@ from pandas import Timestamp -@pytest.mark.parametrize('tz_name', list(pytz.common_timezones)) +@pytest.mark.parametrize("tz_name", list(pytz.common_timezones)) def test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name): - if tz_name == 'UTC': - # skip utc as it's a special case in dateutil - return + if tz_name == "UTC": + pytest.skip("UTC: special case in dateutil") + tz_p = timezones.maybe_get_tz(tz_name) - tz_d = timezones.maybe_get_tz('dateutil/' + tz_name) + tz_d = timezones.maybe_get_tz("dateutil/" + tz_name) + if tz_d is None: - # skip timezones that dateutil doesn't know about. - return + pytest.skip(tz_name + ": dateutil does not know about this one") + assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d) -def test_tzlocal(): - # GH#13583 - ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal()) +def test_tzlocal_repr(): + # see gh-13583 + ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) assert ts.tz == dateutil.tz.tzlocal() assert "tz='tzlocal()')" in repr(ts) + +def test_tzlocal_maybe_get_tz(): + # see gh-13583 tz = timezones.maybe_get_tz('tzlocal()') assert tz == dateutil.tz.tzlocal() - # get offset using normal datetime for test + +def test_tzlocal_offset(): + # see gh-13583 + # + # Get offset using normal datetime for test. + ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) offset = offset.total_seconds() * 1000000000 - assert ts.value + offset == Timestamp('2011-01-01').value + assert ts.value + offset == Timestamp("2011-01-01").value -@pytest.mark.parametrize('eastern, localize', [ - (pytz.timezone('US/Eastern'), lambda tz, x: tz.localize(x)), - (dateutil.tz.gettz('US/Eastern'), lambda tz, x: x.replace(tzinfo=tz))]) -def test_infer_tz(eastern, localize): - utc = pytz.utc + +@pytest.fixture(params=[ + (pytz.timezone("US/Eastern"), lambda tz, x: tz.localize(x)), + (dateutil.tz.gettz("US/Eastern"), lambda tz, x: x.replace(tzinfo=tz)) +]) +def infer_setup(request): + eastern, localize = request.param start_naive = datetime(2001, 1, 1) end_naive = datetime(2009, 1, 1) @@ -50,6 +62,12 @@ def test_infer_tz(eastern, localize): start = localize(eastern, start_naive) end = localize(eastern, end_naive) + return eastern, localize, start, end, start_naive, end_naive + + +def test_infer_tz_compat(infer_setup): + eastern, _, start, end, start_naive, end_naive = infer_setup + assert (timezones.infer_tzinfo(start, end) is conversion.localize_pydatetime(start_naive, eastern).tzinfo) assert (timezones.infer_tzinfo(start, None) is @@ -57,12 +75,27 @@ def test_infer_tz(eastern, localize): assert (timezones.infer_tzinfo(None, end) is conversion.localize_pydatetime(end_naive, eastern).tzinfo) + +def test_infer_tz_utc_localize(infer_setup): + _, _, start, end, start_naive, end_naive = infer_setup + utc = pytz.utc + start = utc.localize(start_naive) end = utc.localize(end_naive) + assert timezones.infer_tzinfo(start, end) is utc + +@pytest.mark.parametrize("ordered", [True, False]) +def test_infer_tz_mismatch(infer_setup, ordered): + eastern, _, _, _, start_naive, end_naive = infer_setup + msg = "Inputs must both have the same timezone" + + utc = pytz.utc + start = utc.localize(start_naive) end = conversion.localize_pydatetime(end_naive, eastern) - with pytest.raises(Exception): - timezones.infer_tzinfo(start, end) - with pytest.raises(Exception): - timezones.infer_tzinfo(end, start) + + args = (start, end) if ordered else (end, start) + + with pytest.raises(AssertionError, match=msg): + timezones.infer_tzinfo(*args) diff --git a/pandas/tests/tslibs/test_tslib.py b/pandas/tests/tslibs/test_tslib.py deleted file mode 100644 index 17bd46cd235da..0000000000000 --- a/pandas/tests/tslibs/test_tslib.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tests for functions from pandas._libs.tslibs""" - -from datetime import date, datetime - -from pandas._libs import tslibs - - -def test_normalize_date(): - value = date(2012, 9, 7) - - result = tslibs.normalize_date(value) - assert (result == datetime(2012, 9, 7)) - - value = datetime(2012, 9, 7, 12) - - result = tslibs.normalize_date(value) - assert (result == datetime(2012, 9, 7)) - - value = datetime(2007, 10, 1, 1, 12, 5, 10) - - actual = tslibs.normalize_date(value) - assert actual == datetime(2007, 10, 1)
I was planning to correct individual files, but then I began to realize that the fixes were involving more and more files (was shifting tests around) in the directory, to the point that I just modified them all. 🙂
https://api.github.com/repos/pandas-dev/pandas/pulls/24587
2019-01-03T08:47:13Z
2019-01-03T21:14:30Z
2019-01-03T21:14:30Z
2019-01-03T21:46:49Z
DOC: Correct description of day_opt in shift_month
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 11ce539d25767..0ca9410df89c0 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -847,11 +847,15 @@ def shift_month(stamp: datetime, months: int, ---------- stamp : datetime or Timestamp months : int - day_opt : None, 'start', 'end', or an integer + day_opt : None, 'start', 'end', 'business_start', 'business_end', or int None: returned datetimelike has the same day as the input, or the last day of the month if the new month is too short 'start': returned datetimelike has day=1 'end': returned datetimelike has day on the last day of the month + 'business_start': returned datetimelike has day on the first + business day of the month + 'business_end': returned datetimelike has day on the last + business day of the month int: returned datetimelike has day equal to day_opt Returns
Doc is updated per implementation at [268150f](https://github.com/pandas-dev/pandas/blob/268150f/pandas/_libs/tslibs/offsets.pyx#L837-L891).
https://api.github.com/repos/pandas-dev/pandas/pulls/24585
2019-01-03T07:33:59Z
2019-01-03T08:43:43Z
2019-01-03T08:43:43Z
2019-01-03T20:30:46Z
Fix docstring templates not being filled (#24535)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a7f2d4fad38de..c853a30c0de79 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1920,7 +1920,7 @@ def notna(self): Returns ------- - filled : %(klass)s + filled : Index """ @Appender(_index_shared_docs['fillna'])
- [x] closes #24535 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Fixes the docstring template issue
https://api.github.com/repos/pandas-dev/pandas/pulls/24584
2019-01-03T07:28:22Z
2019-01-03T12:18:45Z
2019-01-03T12:18:45Z
2019-01-04T06:57:12Z
REF: put DatetimeBlock adjacent to DatetimeLikeBlockMixin
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 5ce5ae7186774..d12114bd951ba 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2206,48 +2206,71 @@ def asi8(self): return self.values.view('i8') -class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): +class DatetimeBlock(DatetimeLikeBlockMixin, Block): __slots__ = () - is_timedelta = True + is_datetime = True _can_hold_na = True - is_numeric = False def __init__(self, values, placement, ndim=None): - if values.dtype != _TD_DTYPE: - values = conversion.ensure_timedelta64ns(values) - if isinstance(values, TimedeltaArray): + values = self._maybe_coerce_values(values) + super(DatetimeBlock, self).__init__(values, + placement=placement, ndim=ndim) + + def _maybe_coerce_values(self, values): + """Input validation for values passed to __init__. Ensure that + we have datetime64ns, coercing if necessary. + + Parameters + ---------- + values : array-like + Must be convertible to datetime64 + + Returns + ------- + values : ndarray[datetime64ns] + + Overridden by DatetimeTZBlock. + """ + if values.dtype != _NS_DTYPE: + values = conversion.ensure_datetime64ns(values) + + if isinstance(values, DatetimeArray): values = values._data + assert isinstance(values, np.ndarray), type(values) - super(TimeDeltaBlock, self).__init__(values, - placement=placement, ndim=ndim) + return values - @property - def _holder(self): - return TimedeltaArray + def _astype(self, dtype, **kwargs): + """ + these automatically copy, so copy=True has no effect + raise on an except if raise == True + """ + dtype = pandas_dtype(dtype) - @property - def _box_func(self): - return lambda x: Timedelta(x, unit='ns') + # if we are passed a datetime64[ns, tz] + if is_datetime64tz_dtype(dtype): + values = self.values + if getattr(values, 'tz', None) is None: + values = DatetimeIndex(values).tz_localize('UTC') + values = values.tz_convert(dtype.tz) + return self.make_block(values) + + # delegate + return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: - return issubclass(tipo.type, (np.timedelta64, np.int64)) - return is_integer(element) or isinstance( - element, (timedelta, np.timedelta64, np.int64)) - - def fillna(self, value, **kwargs): - - # allow filling with integers to be - # interpreted as seconds - if is_integer(value) and not isinstance(value, np.timedelta64): - value = Timedelta(value, unit='s') - return super(TimeDeltaBlock, self).fillna(value, **kwargs) + return tipo == _NS_DTYPE or tipo == np.int64 + return (is_integer(element) or isinstance(element, datetime) or + isna(element)) def _try_coerce_args(self, values, other): """ - Coerce values and other to int64, with null values converted to - iNaT. values is always ndarray-like, other may not be + Coerce values and other to dtype 'i8'. NaN and NaT convert to + the smallest i8, and will correctly round-trip to NaT if converted + back in _try_coerce_result. values is always ndarray-like, other + may not be Parameters ---------- @@ -2258,19 +2281,20 @@ def _try_coerce_args(self, values, other): ------- base-type values, base-type other """ + values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datelike_scalar(other): other = tslibs.iNaT - elif isinstance(other, Timedelta): - other = other.value - elif isinstance(other, timedelta): - other = Timedelta(other).value - elif isinstance(other, np.timedelta64): - other = Timedelta(other).value - elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): + elif isinstance(other, (datetime, np.datetime64, date)): + other = self._box_func(other) + if getattr(other, 'tz') is not None: + raise TypeError("cannot coerce a Timestamp with a tz on a " + "naive Block") + other = other.asm8.view('i8') + elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues @@ -2280,549 +2304,345 @@ def _try_coerce_args(self, values, other): return values, other def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ + """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): - mask = isna(result) if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('m8[ns]') - result[mask] = tslibs.iNaT - elif isinstance(result, (np.integer, np.float)): + try: + result = result.astype('M8[ns]') + except ValueError: + pass + elif isinstance(result, (np.integer, np.float, np.datetime64)): result = self._box_func(result) return result - def should_store(self, value): - return (issubclass(value.dtype.type, np.timedelta64) and - not is_extension_array_dtype(value)) + @property + def _box_func(self): + return tslibs.Timestamp - def to_native_types(self, slicer=None, na_rep=None, quoting=None, - **kwargs): + def to_native_types(self, slicer=None, na_rep=None, date_format=None, + quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values - if slicer is not None: - values = values[:, slicer] - mask = isna(values) + i8values = self.values.view('i8') - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = 'NaT' - rvalues[mask] = na_rep - imask = (~mask).ravel() + if slicer is not None: + i8values = i8values[..., slicer] - # FIXME: - # should use the formats.format.Timedelta64Formatter here - # to figure what format to pass to the Timedelta - # e.g. to not show the decimals say - rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') - for val in values.ravel()[imask]], - dtype=object) - return rvalues + from pandas.io.formats.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(values, date_format) - def external_values(self, dtype=None): - return np.asarray(self.values.astype("timedelta64[ns]", copy=False)) + result = tslib.format_array_from_datetime( + i8values.ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(i8values.shape) + return np.atleast_2d(result) + def should_store(self, value): + return (issubclass(value.dtype.type, np.datetime64) and + not is_datetime64tz_dtype(value) and + not is_extension_array_dtype(value)) -class BoolBlock(NumericBlock): - __slots__ = () - is_bool = True - _can_hold_na = False + def set(self, locs, values): + """ + Modify Block in-place with new item value - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.bool_) - return isinstance(element, (bool, np.bool_)) + Returns + ------- + None + """ + values = conversion.ensure_datetime64ns(values, copy=False) - def should_store(self, value): - return (issubclass(value.dtype.type, np.bool_) and not - is_extension_array_dtype(value)) + self.values[locs] = values - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True): - inplace = validate_bool_kwarg(inplace, 'inplace') - to_replace_values = np.atleast_1d(to_replace) - if not np.can_cast(to_replace_values, bool): - return self - return super(BoolBlock, self).replace(to_replace, value, - inplace=inplace, filter=filter, - regex=regex, convert=convert) + def external_values(self): + return np.asarray(self.values.astype('datetime64[ns]', copy=False)) -class ObjectBlock(Block): +class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): + """ implement a datetime64 block with a tz attribute """ __slots__ = () - is_object = True - _can_hold_na = True - - def __init__(self, values, placement=None, ndim=2): - if issubclass(values.dtype.type, compat.string_types): - values = np.array(values, dtype=object) + is_datetimetz = True + is_extension = True - super(ObjectBlock, self).__init__(values, ndim=ndim, - placement=placement) + def __init__(self, values, placement, ndim=2, dtype=None): + # XXX: This will end up calling _maybe_coerce_values twice + # when dtype is not None. It's relatively cheap (just an isinstance) + # but it'd nice to avoid. + # + # If we can remove dtype from __init__, and push that conversion + # push onto the callers, then we can remove this entire __init__ + # and just use DatetimeBlock's. + if dtype is not None: + values = self._maybe_coerce_values(values, dtype=dtype) + super(DatetimeTZBlock, self).__init__(values, placement=placement, + ndim=ndim) @property - def is_bool(self): - """ we can be a bool if we have only bool values but are of type - object - """ - return lib.is_bool_array(self.values.ravel()) + def _holder(self): + return DatetimeArray - # TODO: Refactor when convert_objects is removed since there will be 1 path - def convert(self, *args, **kwargs): - """ attempt to coerce any object types to better types return a copy of - the block (if copy = True) by definition we ARE an ObjectBlock!!!!! + def _maybe_coerce_values(self, values, dtype=None): + """Input validation for values passed to __init__. Ensure that + we have datetime64TZ, coercing if necessary. - can return multiple blocks! + Parametetrs + ----------- + values : array-like + Must be convertible to datetime64 + dtype : string or DatetimeTZDtype, optional + Does a shallow copy to this tz + + Returns + ------- + values : ndarray[datetime64ns] """ + if not isinstance(values, self._holder): + values = self._holder(values) - if args: - raise NotImplementedError - by_item = True if 'by_item' not in kwargs else kwargs['by_item'] + if dtype is not None: + if isinstance(dtype, compat.string_types): + dtype = DatetimeTZDtype.construct_from_string(dtype) + values = type(values)(values, dtype=dtype) - new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] - new_style = False - for kw in new_inputs: - new_style |= kw in kwargs + if values.tz is None: + raise ValueError("cannot create a DatetimeTZBlock without a tz") - if new_style: - fn = soft_convert_objects - fn_inputs = new_inputs - else: - fn = maybe_convert_objects - fn_inputs = ['convert_dates', 'convert_numeric', - 'convert_timedeltas'] - fn_inputs += ['copy'] + return values - fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs} + @property + def is_view(self): + """ return a boolean if I am possibly a view """ + # check the ndarray values of the DatetimeIndex values + return self.values._data.base is not None - # operate column-by-column - def f(m, v, i): - shape = v.shape - values = fn(v.ravel(), **fn_kwargs) - try: - values = values.reshape(shape) - values = _block_shape(values, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass - - return values - - if by_item and not self._is_single_block: - blocks = self.split_and_operate(None, f, False) - else: - values = f(None, self.values.ravel(), None) - blocks = [make_block(values, ndim=self.ndim, - placement=self.mgr_locs)] - - return blocks - - def set(self, locs, values): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - try: - self.values[locs] = values - except (ValueError): - - # broadcasting error - # see GH6171 - new_shape = list(values.shape) - new_shape[0] = len(self.items) - self.values = np.empty(tuple(new_shape), dtype=self.dtype) - self.values.fill(np.nan) - self.values[locs] = values - - def _maybe_downcast(self, blocks, downcast=None): - - if downcast is not None: - return blocks - - # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) - for b in blocks]) - - def _can_hold_element(self, element): - return True - - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ - - if isinstance(other, ABCDatetimeIndex): - # May get a DatetimeIndex here. Unbox it. - other = other.array - - if isinstance(other, DatetimeArray): - # hit in pandas/tests/indexing/test_coercion.py - # ::TestWhereCoercion::test_where_series_datetime64[datetime64tz] - # when falling back to ObjectBlock.where - other = other.astype(object) - - return values, other - - def should_store(self, value): - return not (issubclass(value.dtype.type, - (np.integer, np.floating, np.complexfloating, - np.datetime64, np.bool_)) or - # TODO(ExtensionArray): remove is_extension_type - # when all extension arrays have been ported. - is_extension_type(value) or - is_extension_array_dtype(value)) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True): - to_rep_is_list = is_list_like(to_replace) - value_is_list = is_list_like(value) - both_lists = to_rep_is_list and value_is_list - either_list = to_rep_is_list or value_is_list - - result_blocks = [] - blocks = [self] - - if not either_list and is_re(to_replace): - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, regex=True, - convert=convert) - elif not (either_list or regex): - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - convert=convert) - elif both_lists: - for to_rep, v in zip(to_replace, value): - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, v, inplace=inplace, - filter=filter, regex=regex, - convert=convert) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - elif to_rep_is_list and regex: - for to_rep in to_replace: - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, value, inplace=inplace, - filter=filter, regex=regex, - convert=convert) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, convert=convert, - regex=regex) + def copy(self, deep=True): + """ copy constructor """ + values = self.values + if deep: + values = values.copy(deep=True) + return self.make_block_same_class(values) - def _replace_single(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mask=None): + def get_values(self, dtype=None): """ - Replace elements by the given value. + Returns an ndarray of values. Parameters ---------- - to_replace : object or pattern - Scalar to replace or regular expression to match. - value : object - Replacement object. - inplace : bool, default False - Perform inplace modification. - filter : list, optional - regex : bool, default False - If true, perform regular expression substitution. - convert : bool, default True - If true, try to coerce any object types to better types. - mask : array-like of bool, optional - True indicate corresponding element is ignored. + dtype : np.dtype + Only `object`-like dtypes are respected here (not sure + why). Returns ------- - a new block, the result after replacing - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - # to_replace is regex compilable - to_rep_re = regex and is_re_compilable(to_replace) - - # regex is regex compilable - regex_re = is_re_compilable(regex) - - # only one will survive - if to_rep_re and regex_re: - raise AssertionError('only one of to_replace and regex can be ' - 'regex compilable') - - # if regex was passed as something that can be a regex (rather than a - # boolean) - if regex_re: - to_replace = regex - - regex = regex_re or to_rep_re - - # try to get the pattern attribute (compiled re) or it's a string - try: - pattern = to_replace.pattern - except AttributeError: - pattern = to_replace - - # if the pattern is not empty and to_replace is either a string or a - # regex - if regex and pattern: - rx = re.compile(to_replace) - else: - # if the thing to replace is not a string or compiled regex call - # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex) - - new_values = self.values if inplace else self.values.copy() - - # deal with replacing values with objects (strings) that match but - # whose replacement is not a string (numeric, nan, object) - if isna(value) or not isinstance(value, compat.string_types): - - def re_replacer(s): - try: - return value if rx.search(s) is not None else s - except TypeError: - return s - else: - # value is guaranteed to be a string here, s can be either a string - # or null if it's null it gets returned - def re_replacer(s): - try: - return rx.sub(value, s) - except TypeError: - return s + values : ndarray + When ``dtype=object``, then and object-dtype ndarray of + boxed values is returned. Otherwise, an M8[ns] ndarray + is returned. - f = np.vectorize(re_replacer, otypes=[self.dtype]) + DatetimeArray is always 1-d. ``get_values`` will reshape + the return value to be the same dimensionality as the + block. + """ + values = self.values + if is_object_dtype(dtype): + values = values._box_values(values._data) - if filter is None: - filt = slice(None) - else: - filt = self.mgr_locs.isin(filter).nonzero()[0] + values = np.asarray(values) - if mask is None: - new_values[filt] = f(new_values[filt]) - else: - new_values[filt][mask] = f(new_values[filt][mask]) + if self.ndim == 2: + # Ensure that our shape is correct for DataFrame. + # ExtensionArrays are always 1-D, even in a DataFrame when + # the analogous NumPy-backed column would be a 2-D ndarray. + values = values.reshape(1, -1) + return values - # convert - block = self.make_block(new_values) - if convert: - block = block.convert(by_item=True, numeric=False) - return block + def _slice(self, slicer): + """ return a slice of my values """ + if isinstance(slicer, tuple): + col, loc = slicer + if not com.is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + return self.values[slicer] - def _replace_coerce(self, to_replace, value, inplace=True, regex=False, - convert=False, mask=None): + def _try_coerce_args(self, values, other): """ - Replace value corresponding to the given boolean array with another - value. + localize and return i8 for the values Parameters ---------- - to_replace : object or pattern - Scalar to replace or regular expression to match. - value : object - Replacement object. - inplace : bool, default False - Perform inplace modification. - regex : bool, default False - If true, perform regular expression substitution. - convert : bool, default True - If true, try to coerce any object types to better types. - mask : array-like of bool, optional - True indicate corresponding element is ignored. + values : ndarray-like + other : ndarray-like or scalar Returns ------- - A new block if there is anything to replace or the original block. + base-type values, base-type other """ - if mask.any(): - block = super(ObjectBlock, self)._replace_coerce( - to_replace=to_replace, value=value, inplace=inplace, - regex=regex, convert=convert, mask=mask) - if convert: - block = [b.convert(by_item=True, numeric=False, copy=True) - for b in block] - return block - return self - + # asi8 is a view, needs copy + values = _block_shape(values.view("i8"), ndim=self.ndim) -class CategoricalBlock(ExtensionBlock): - __slots__ = () - is_categorical = True - _verify_integrity = True - _can_hold_na = True - _concatenator = staticmethod(_concat._concat_categorical) + if isinstance(other, ABCSeries): + other = self._holder(other) - def __init__(self, values, placement, ndim=None): - from pandas.core.arrays.categorical import _maybe_to_categorical + if isinstance(other, bool): + raise TypeError + elif is_datetime64_dtype(other): + # add the tz back + other = self._holder(other, dtype=self.dtype) - # coerce to categorical if we can - super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - placement=placement, - ndim=ndim) + elif (is_null_datelike_scalar(other) or + (lib.is_scalar(other) and isna(other))): + other = tslibs.iNaT + elif isinstance(other, self._holder): + if other.tz != self.values.tz: + raise ValueError("incompatible or non tz-aware value") + other = _block_shape(other.asi8, ndim=self.ndim) + elif isinstance(other, (np.datetime64, datetime, date)): + other = tslibs.Timestamp(other) + tz = getattr(other, 'tz', None) - @property - def _holder(self): - return Categorical + # test we can have an equal time zone + if tz is None or str(tz) != str(self.values.tz): + raise ValueError("incompatible or non tz-aware value") + other = other.value + else: + raise TypeError - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return np.object_ + return values, other def _try_coerce_result(self, result): """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = self._box_func(result) + if isinstance(result, np.ndarray): + # allow passing of > 1dim if its trivial - # GH12564: CategoricalBlock is 1-dim only - # while returned results could be any dim - if ((not is_categorical_dtype(result)) and - isinstance(result, np.ndarray)): - result = _block_shape(result, ndim=self.ndim) + if result.ndim > 1: + result = result.reshape(np.prod(result.shape)) + # GH#24096 new values invalidates a frequency + result = self._holder._simple_new(result, freq=None, + tz=self.values.tz) return result - def to_dense(self): - # Categorical.get_values returns a DatetimeIndex for datetime - # categories, so we can't simply use `np.asarray(self.values)` like - # other types. - return self.values.get_values() + @property + def _box_func(self): + return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + def diff(self, n, axis=0): + """1st discrete difference - values = self.values - if slicer is not None: - # Categorical is always one dimension - values = values[slicer] - mask = isna(values) - values = np.array(values, dtype='object') - values[mask] = na_rep + Parameters + ---------- + n : int, number of periods to diff + axis : int, axis to diff upon. default 0 - # we are expected to return a 2-d ndarray - return values.reshape(1, len(values)) + Return + ------ + A list with a new TimeDeltaBlock. - def concat_same_type(self, to_concat, placement=None): + Note + ---- + The arguments here are mimicking shift so they are called correctly + by apply. """ - Concatenate list of single blocks of the same type. + if axis == 0: + # Cannot currently calculate diff across multiple blocks since this + # function is invoked via apply + raise NotImplementedError + new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 - Note that this CategoricalBlock._concat_same_type *may* not - return a CategoricalBlock. When the categories in `to_concat` - differ, this will return an object ndarray. + # Reshape the new_values like how algos.diff does for timedelta data + new_values = new_values.reshape(1, len(new_values)) + new_values = new_values.astype('timedelta64[ns]') + return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] - If / when we decide we don't like that behavior: + def concat_same_type(self, to_concat, placement=None): + # need to handle concat([tz1, tz2]) here, since DatetimeArray + # only handles cases where all the tzs are the same. + # Instead of placing the condition here, it could also go into the + # is_uniform_join_units check, but I'm not sure what is better. + if len({x.dtype for x in to_concat}) > 1: + values = _concat._concat_datetime([x.values for x in to_concat]) + placement = placement or slice(0, len(values), 1) - 1. Change Categorical._concat_same_type to use union_categoricals - 2. Delete this method. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be object dtype - return make_block( - values, placement=placement or slice(0, len(values), 1), - ndim=self.ndim) + if self.ndim > 1: + values = np.atleast_2d(values) + return ObjectBlock(values, ndim=self.ndim, placement=placement) + return super(DatetimeTZBlock, self).concat_same_type(to_concat, + placement) - def where(self, other, cond, align=True, errors='raise', - try_cast=False, axis=0, transpose=False): - # TODO(CategoricalBlock.where): - # This can all be deleted in favor of ExtensionBlock.where once - # we enforce the deprecation. - object_msg = ( - "Implicitly converting categorical to object-dtype ndarray. " - "One or more of the values in 'other' are not present in this " - "categorical's categories. A future version of pandas will raise " - "a ValueError when 'other' contains different categories.\n\n" - "To preserve the current behavior, add the new categories to " - "the categorical before calling 'where', or convert the " - "categorical to a different dtype." - ) + def fillna(self, value, limit=None, inplace=False, downcast=None): + # We support filling a DatetimeTZ with a `value` whose timezone + # is different by coercing to object. try: - # Attempt to do preserve categorical dtype. - result = super(CategoricalBlock, self).where( - other, cond, align, errors, try_cast, axis, transpose + return super(DatetimeTZBlock, self).fillna( + value, limit, inplace, downcast + ) + except (ValueError, TypeError): + # different timezones, or a non-tz + return self.astype(object).fillna( + value, limit=limit, inplace=inplace, downcast=downcast ) - except (TypeError, ValueError): - warnings.warn(object_msg, FutureWarning, stacklevel=6) - result = self.astype(object).where(other, cond, align=align, - errors=errors, - try_cast=try_cast, - axis=axis, transpose=transpose) - return result + def setitem(self, indexer, value): + # https://github.com/pandas-dev/pandas/issues/24020 + # Need a dedicated setitem until #24020 (type promotion in setitem + # for extension arrays) is designed and implemented. + try: + return super(DatetimeTZBlock, self).setitem(indexer, value) + except (ValueError, TypeError): + newb = make_block(self.values.astype(object), + placement=self.mgr_locs, + klass=ObjectBlock,) + return newb.setitem(indexer, value) -class DatetimeBlock(DatetimeLikeBlockMixin, Block): + +class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = () - is_datetime = True + is_timedelta = True _can_hold_na = True + is_numeric = False def __init__(self, values, placement, ndim=None): - values = self._maybe_coerce_values(values) - super(DatetimeBlock, self).__init__(values, - placement=placement, ndim=ndim) - - def _maybe_coerce_values(self, values): - """Input validation for values passed to __init__. Ensure that - we have datetime64ns, coercing if necessary. - - Parameters - ---------- - values : array-like - Must be convertible to datetime64 - - Returns - ------- - values : ndarray[datetime64ns] - - Overridden by DatetimeTZBlock. - """ - if values.dtype != _NS_DTYPE: - values = conversion.ensure_datetime64ns(values) - - if isinstance(values, DatetimeArray): + if values.dtype != _TD_DTYPE: + values = conversion.ensure_timedelta64ns(values) + if isinstance(values, TimedeltaArray): values = values._data - assert isinstance(values, np.ndarray), type(values) - return values - - def _astype(self, dtype, **kwargs): - """ - these automatically copy, so copy=True has no effect - raise on an except if raise == True - """ - dtype = pandas_dtype(dtype) + super(TimeDeltaBlock, self).__init__(values, + placement=placement, ndim=ndim) - # if we are passed a datetime64[ns, tz] - if is_datetime64tz_dtype(dtype): - values = self.values - if getattr(values, 'tz', None) is None: - values = DatetimeIndex(values).tz_localize('UTC') - values = values.tz_convert(dtype.tz) - return self.make_block(values) + @property + def _holder(self): + return TimedeltaArray - # delegate - return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + @property + def _box_func(self): + return lambda x: Timedelta(x, unit='ns') def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: - return tipo == _NS_DTYPE or tipo == np.int64 - return (is_integer(element) or isinstance(element, datetime) or - isna(element)) + return issubclass(tipo.type, (np.timedelta64, np.int64)) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64, np.int64)) + + def fillna(self, value, **kwargs): + + # allow filling with integers to be + # interpreted as seconds + if is_integer(value) and not isinstance(value, np.timedelta64): + value = Timedelta(value, unit='s') + return super(TimeDeltaBlock, self).fillna(value, **kwargs) def _try_coerce_args(self, values, other): """ - Coerce values and other to dtype 'i8'. NaN and NaT convert to - the smallest i8, and will correctly round-trip to NaT if converted - back in _try_coerce_result. values is always ndarray-like, other - may not be + Coerce values and other to int64, with null values converted to + iNaT. values is always ndarray-like, other may not be Parameters ---------- @@ -2833,20 +2653,19 @@ def _try_coerce_args(self, values, other): ------- base-type values, base-type other """ - values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datelike_scalar(other): other = tslibs.iNaT - elif isinstance(other, (datetime, np.datetime64, date)): - other = self._box_func(other) - if getattr(other, 'tz') is not None: - raise TypeError("cannot coerce a Timestamp with a tz on a " - "naive Block") - other = other.asm8.view('i8') - elif hasattr(other, 'dtype') and is_datetime64_dtype(other): + elif isinstance(other, Timedelta): + other = other.value + elif isinstance(other, timedelta): + other = Timedelta(other).value + elif isinstance(other, np.timedelta64): + other = Timedelta(other).value + elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues @@ -2856,43 +2675,141 @@ def _try_coerce_args(self, values, other): return values, other def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ + """ reverse of try_coerce_args / try_operate """ if isinstance(result, np.ndarray): + mask = isna(result) if result.dtype.kind in ['i', 'f', 'O']: - try: - result = result.astype('M8[ns]') - except ValueError: - pass - elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = result.astype('m8[ns]') + result[mask] = tslibs.iNaT + elif isinstance(result, (np.integer, np.float)): result = self._box_func(result) return result + def should_store(self, value): + return (issubclass(value.dtype.type, np.timedelta64) and + not is_extension_array_dtype(value)) + + def to_native_types(self, slicer=None, na_rep=None, quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[:, slicer] + mask = isna(values) + + rvalues = np.empty(values.shape, dtype=object) + if na_rep is None: + na_rep = 'NaT' + rvalues[mask] = na_rep + imask = (~mask).ravel() + + # FIXME: + # should use the formats.format.Timedelta64Formatter here + # to figure what format to pass to the Timedelta + # e.g. to not show the decimals say + rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') + for val in values.ravel()[imask]], + dtype=object) + return rvalues + + def external_values(self, dtype=None): + return np.asarray(self.values.astype("timedelta64[ns]", copy=False)) + + +class BoolBlock(NumericBlock): + __slots__ = () + is_bool = True + _can_hold_na = False + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.bool_) + return isinstance(element, (bool, np.bool_)) + + def should_store(self, value): + return (issubclass(value.dtype.type, np.bool_) and not + is_extension_array_dtype(value)) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True): + inplace = validate_bool_kwarg(inplace, 'inplace') + to_replace_values = np.atleast_1d(to_replace) + if not np.can_cast(to_replace_values, bool): + return self + return super(BoolBlock, self).replace(to_replace, value, + inplace=inplace, filter=filter, + regex=regex, convert=convert) + + +class ObjectBlock(Block): + __slots__ = () + is_object = True + _can_hold_na = True + + def __init__(self, values, placement=None, ndim=2): + if issubclass(values.dtype.type, compat.string_types): + values = np.array(values, dtype=object) + + super(ObjectBlock, self).__init__(values, ndim=ndim, + placement=placement) + @property - def _box_func(self): - return tslibs.Timestamp + def is_bool(self): + """ we can be a bool if we have only bool values but are of type + object + """ + return lib.is_bool_array(self.values.ravel()) + + # TODO: Refactor when convert_objects is removed since there will be 1 path + def convert(self, *args, **kwargs): + """ attempt to coerce any object types to better types return a copy of + the block (if copy = True) by definition we ARE an ObjectBlock!!!!! + + can return multiple blocks! + """ + + if args: + raise NotImplementedError + by_item = True if 'by_item' not in kwargs else kwargs['by_item'] - def to_native_types(self, slicer=None, na_rep=None, date_format=None, - quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] + new_style = False + for kw in new_inputs: + new_style |= kw in kwargs - values = self.values - i8values = self.values.view('i8') + if new_style: + fn = soft_convert_objects + fn_inputs = new_inputs + else: + fn = maybe_convert_objects + fn_inputs = ['convert_dates', 'convert_numeric', + 'convert_timedeltas'] + fn_inputs += ['copy'] - if slicer is not None: - i8values = i8values[..., slicer] + fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs} - from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(values, date_format) + # operate column-by-column + def f(m, v, i): + shape = v.shape + values = fn(v.ravel(), **fn_kwargs) + try: + values = values.reshape(shape) + values = _block_shape(values, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass - result = tslib.format_array_from_datetime( - i8values.ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(i8values.shape) - return np.atleast_2d(result) + return values - def should_store(self, value): - return (issubclass(value.dtype.type, np.datetime64) and - not is_datetime64tz_dtype(value) and - not is_extension_array_dtype(value)) + if by_item and not self._is_single_block: + blocks = self.split_and_operate(None, f, False) + else: + values = f(None, self.values.ravel(), None) + blocks = [make_block(values, ndim=self.ndim, + placement=self.mgr_locs)] + + return blocks def set(self, locs, values): """ @@ -2902,255 +2819,338 @@ def set(self, locs, values): ------- None """ - values = conversion.ensure_datetime64ns(values, copy=False) + try: + self.values[locs] = values + except (ValueError): - self.values[locs] = values + # broadcasting error + # see GH6171 + new_shape = list(values.shape) + new_shape[0] = len(self.items) + self.values = np.empty(tuple(new_shape), dtype=self.dtype) + self.values.fill(np.nan) + self.values[locs] = values - def external_values(self): - return np.asarray(self.values.astype('datetime64[ns]', copy=False)) + def _maybe_downcast(self, blocks, downcast=None): + if downcast is not None: + return blocks -class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): - """ implement a datetime64 block with a tz attribute """ - __slots__ = () - is_datetimetz = True - is_extension = True + # split and convert the blocks + return _extend_blocks([b.convert(datetime=True, numeric=False) + for b in blocks]) - def __init__(self, values, placement, ndim=2, dtype=None): - # XXX: This will end up calling _maybe_coerce_values twice - # when dtype is not None. It's relatively cheap (just an isinstance) - # but it'd nice to avoid. - # - # If we can remove dtype from __init__, and push that conversion - # push onto the callers, then we can remove this entire __init__ - # and just use DatetimeBlock's. - if dtype is not None: - values = self._maybe_coerce_values(values, dtype=dtype) - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim) + def _can_hold_element(self, element): + return True - @property - def _holder(self): - return DatetimeArray + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ - def _maybe_coerce_values(self, values, dtype=None): - """Input validation for values passed to __init__. Ensure that - we have datetime64TZ, coercing if necessary. + if isinstance(other, ABCDatetimeIndex): + # May get a DatetimeIndex here. Unbox it. + other = other.array - Parametetrs - ----------- - values : array-like - Must be convertible to datetime64 - dtype : string or DatetimeTZDtype, optional - Does a shallow copy to this tz + if isinstance(other, DatetimeArray): + # hit in pandas/tests/indexing/test_coercion.py + # ::TestWhereCoercion::test_where_series_datetime64[datetime64tz] + # when falling back to ObjectBlock.where + other = other.astype(object) - Returns - ------- - values : ndarray[datetime64ns] - """ - if not isinstance(values, self._holder): - values = self._holder(values) + return values, other - if dtype is not None: - if isinstance(dtype, compat.string_types): - dtype = DatetimeTZDtype.construct_from_string(dtype) - values = type(values)(values, dtype=dtype) + def should_store(self, value): + return not (issubclass(value.dtype.type, + (np.integer, np.floating, np.complexfloating, + np.datetime64, np.bool_)) or + # TODO(ExtensionArray): remove is_extension_type + # when all extension arrays have been ported. + is_extension_type(value) or + is_extension_array_dtype(value)) - if values.tz is None: - raise ValueError("cannot create a DatetimeTZBlock without a tz") + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True): + to_rep_is_list = is_list_like(to_replace) + value_is_list = is_list_like(value) + both_lists = to_rep_is_list and value_is_list + either_list = to_rep_is_list or value_is_list - return values + result_blocks = [] + blocks = [self] - @property - def is_view(self): - """ return a boolean if I am possibly a view """ - # check the ndarray values of the DatetimeIndex values - return self.values._data.base is not None + if not either_list and is_re(to_replace): + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, regex=True, + convert=convert) + elif not (either_list or regex): + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + convert=convert) + elif both_lists: + for to_rep, v in zip(to_replace, value): + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, v, inplace=inplace, + filter=filter, regex=regex, + convert=convert) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks - def copy(self, deep=True): - """ copy constructor """ - values = self.values - if deep: - values = values.copy(deep=True) - return self.make_block_same_class(values) + elif to_rep_is_list and regex: + for to_rep in to_replace: + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, value, inplace=inplace, + filter=filter, regex=regex, + convert=convert) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks - def get_values(self, dtype=None): + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, convert=convert, + regex=regex) + + def _replace_single(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mask=None): """ - Returns an ndarray of values. + Replace elements by the given value. Parameters ---------- - dtype : np.dtype - Only `object`-like dtypes are respected here (not sure - why). + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + inplace : bool, default False + Perform inplace modification. + filter : list, optional + regex : bool, default False + If true, perform regular expression substitution. + convert : bool, default True + If true, try to coerce any object types to better types. + mask : array-like of bool, optional + True indicate corresponding element is ignored. Returns ------- - values : ndarray - When ``dtype=object``, then and object-dtype ndarray of - boxed values is returned. Otherwise, an M8[ns] ndarray - is returned. + a new block, the result after replacing + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + + # to_replace is regex compilable + to_rep_re = regex and is_re_compilable(to_replace) + + # regex is regex compilable + regex_re = is_re_compilable(regex) + + # only one will survive + if to_rep_re and regex_re: + raise AssertionError('only one of to_replace and regex can be ' + 'regex compilable') + + # if regex was passed as something that can be a regex (rather than a + # boolean) + if regex_re: + to_replace = regex + + regex = regex_re or to_rep_re + + # try to get the pattern attribute (compiled re) or it's a string + try: + pattern = to_replace.pattern + except AttributeError: + pattern = to_replace + + # if the pattern is not empty and to_replace is either a string or a + # regex + if regex and pattern: + rx = re.compile(to_replace) + else: + # if the thing to replace is not a string or compiled regex call + # the superclass method -> to_replace is some kind of object + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex) + + new_values = self.values if inplace else self.values.copy() + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, compat.string_types): + + def re_replacer(s): + try: + return value if rx.search(s) is not None else s + except TypeError: + return s + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + try: + return rx.sub(value, s) + except TypeError: + return s - DatetimeArray is always 1-d. ``get_values`` will reshape - the return value to be the same dimensionality as the - block. - """ - values = self.values - if is_object_dtype(dtype): - values = values._box_values(values._data) + f = np.vectorize(re_replacer, otypes=[self.dtype]) - values = np.asarray(values) + if filter is None: + filt = slice(None) + else: + filt = self.mgr_locs.isin(filter).nonzero()[0] - if self.ndim == 2: - # Ensure that our shape is correct for DataFrame. - # ExtensionArrays are always 1-D, even in a DataFrame when - # the analogous NumPy-backed column would be a 2-D ndarray. - values = values.reshape(1, -1) - return values + if mask is None: + new_values[filt] = f(new_values[filt]) + else: + new_values[filt][mask] = f(new_values[filt][mask]) - def _slice(self, slicer): - """ return a slice of my values """ - if isinstance(slicer, tuple): - col, loc = slicer - if not com.is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - return self.values[slicer] + # convert + block = self.make_block(new_values) + if convert: + block = block.convert(by_item=True, numeric=False) + return block - def _try_coerce_args(self, values, other): + def _replace_coerce(self, to_replace, value, inplace=True, regex=False, + convert=False, mask=None): """ - localize and return i8 for the values + Replace value corresponding to the given boolean array with another + value. Parameters ---------- - values : ndarray-like - other : ndarray-like or scalar + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + inplace : bool, default False + Perform inplace modification. + regex : bool, default False + If true, perform regular expression substitution. + convert : bool, default True + If true, try to coerce any object types to better types. + mask : array-like of bool, optional + True indicate corresponding element is ignored. Returns ------- - base-type values, base-type other + A new block if there is anything to replace or the original block. """ - # asi8 is a view, needs copy - values = _block_shape(values.view("i8"), ndim=self.ndim) + if mask.any(): + block = super(ObjectBlock, self)._replace_coerce( + to_replace=to_replace, value=value, inplace=inplace, + regex=regex, convert=convert, mask=mask) + if convert: + block = [b.convert(by_item=True, numeric=False, copy=True) + for b in block] + return block + return self - if isinstance(other, ABCSeries): - other = self._holder(other) - if isinstance(other, bool): - raise TypeError - elif is_datetime64_dtype(other): - # add the tz back - other = self._holder(other, dtype=self.dtype) +class CategoricalBlock(ExtensionBlock): + __slots__ = () + is_categorical = True + _verify_integrity = True + _can_hold_na = True + _concatenator = staticmethod(_concat._concat_categorical) - elif (is_null_datelike_scalar(other) or - (lib.is_scalar(other) and isna(other))): - other = tslibs.iNaT - elif isinstance(other, self._holder): - if other.tz != self.values.tz: - raise ValueError("incompatible or non tz-aware value") - other = _block_shape(other.asi8, ndim=self.ndim) - elif isinstance(other, (np.datetime64, datetime, date)): - other = tslibs.Timestamp(other) - tz = getattr(other, 'tz', None) + def __init__(self, values, placement, ndim=None): + from pandas.core.arrays.categorical import _maybe_to_categorical - # test we can have an equal time zone - if tz is None or str(tz) != str(self.values.tz): - raise ValueError("incompatible or non tz-aware value") - other = other.value - else: - raise TypeError + # coerce to categorical if we can + super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), + placement=placement, + ndim=ndim) - return values, other + @property + def _holder(self): + return Categorical + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array + """ + return np.object_ def _try_coerce_result(self, result): """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('M8[ns]') - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = tslibs.Timestamp(result, tz=self.values.tz) - if isinstance(result, np.ndarray): - # allow passing of > 1dim if its trivial - if result.ndim > 1: - result = result.reshape(np.prod(result.shape)) - # GH#24096 new values invalidates a frequency - result = self._holder._simple_new(result, freq=None, - tz=self.values.tz) + # GH12564: CategoricalBlock is 1-dim only + # while returned results could be any dim + if ((not is_categorical_dtype(result)) and + isinstance(result, np.ndarray)): + result = _block_shape(result, ndim=self.ndim) return result - @property - def _box_func(self): - return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) + def to_dense(self): + # Categorical.get_values returns a DatetimeIndex for datetime + # categories, so we can't simply use `np.asarray(self.values)` like + # other types. + return self.values.get_values() - def diff(self, n, axis=0): - """1st discrete difference + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ - Parameters - ---------- - n : int, number of periods to diff - axis : int, axis to diff upon. default 0 + values = self.values + if slicer is not None: + # Categorical is always one dimension + values = values[slicer] + mask = isna(values) + values = np.array(values, dtype='object') + values[mask] = na_rep - Return - ------ - A list with a new TimeDeltaBlock. + # we are expected to return a 2-d ndarray + return values.reshape(1, len(values)) - Note - ---- - The arguments here are mimicking shift so they are called correctly - by apply. + def concat_same_type(self, to_concat, placement=None): """ - if axis == 0: - # Cannot currently calculate diff across multiple blocks since this - # function is invoked via apply - raise NotImplementedError - new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 + Concatenate list of single blocks of the same type. - # Reshape the new_values like how algos.diff does for timedelta data - new_values = new_values.reshape(1, len(new_values)) - new_values = new_values.astype('timedelta64[ns]') - return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] + Note that this CategoricalBlock._concat_same_type *may* not + return a CategoricalBlock. When the categories in `to_concat` + differ, this will return an object ndarray. - def concat_same_type(self, to_concat, placement=None): - # need to handle concat([tz1, tz2]) here, since DatetimeArray - # only handles cases where all the tzs are the same. - # Instead of placing the condition here, it could also go into the - # is_uniform_join_units check, but I'm not sure what is better. - if len({x.dtype for x in to_concat}) > 1: - values = _concat._concat_datetime([x.values for x in to_concat]) - placement = placement or slice(0, len(values), 1) + If / when we decide we don't like that behavior: - if self.ndim > 1: - values = np.atleast_2d(values) - return ObjectBlock(values, ndim=self.ndim, placement=placement) - return super(DatetimeTZBlock, self).concat_same_type(to_concat, - placement) + 1. Change Categorical._concat_same_type to use union_categoricals + 2. Delete this method. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be object dtype + return make_block( + values, placement=placement or slice(0, len(values), 1), + ndim=self.ndim) - def fillna(self, value, limit=None, inplace=False, downcast=None): - # We support filling a DatetimeTZ with a `value` whose timezone - # is different by coercing to object. + def where(self, other, cond, align=True, errors='raise', + try_cast=False, axis=0, transpose=False): + # TODO(CategoricalBlock.where): + # This can all be deleted in favor of ExtensionBlock.where once + # we enforce the deprecation. + object_msg = ( + "Implicitly converting categorical to object-dtype ndarray. " + "One or more of the values in 'other' are not present in this " + "categorical's categories. A future version of pandas will raise " + "a ValueError when 'other' contains different categories.\n\n" + "To preserve the current behavior, add the new categories to " + "the categorical before calling 'where', or convert the " + "categorical to a different dtype." + ) try: - return super(DatetimeTZBlock, self).fillna( - value, limit, inplace, downcast - ) - except (ValueError, TypeError): - # different timezones, or a non-tz - return self.astype(object).fillna( - value, limit=limit, inplace=inplace, downcast=downcast + # Attempt to do preserve categorical dtype. + result = super(CategoricalBlock, self).where( + other, cond, align, errors, try_cast, axis, transpose ) - - def setitem(self, indexer, value): - # https://github.com/pandas-dev/pandas/issues/24020 - # Need a dedicated setitem until #24020 (type promotion in setitem - # for extension arrays) is designed and implemented. - try: - return super(DatetimeTZBlock, self).setitem(indexer, value) - except (ValueError, TypeError): - newb = make_block(self.values.astype(object), - placement=self.mgr_locs, - klass=ObjectBlock,) - return newb.setitem(indexer, value) + except (TypeError, ValueError): + warnings.warn(object_msg, FutureWarning, stacklevel=6) + result = self.astype(object).where(other, cond, align=align, + errors=errors, + try_cast=try_cast, + axis=axis, transpose=transpose) + return result # -----------------------------------------------------------------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24582
2019-01-03T02:53:43Z
2019-01-03T03:45:14Z
2019-01-03T03:45:14Z
2019-01-03T03:46:39Z
Support hard-masked numpy arrays
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 7628c53cefa06..c9210a5597d48 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1537,6 +1537,7 @@ Missing - Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`) - :func:`Series.isin` now treats all NaN-floats as equal also for ``np.object``-dtype. This behavior is consistent with the behavior for float64 (:issue:`22119`) - :func:`unique` no longer mangles NaN-floats and the ``NaT``-object for ``np.object``-dtype, i.e. ``NaT`` is no longer coerced to a NaN-value and is treated as a different entity. (:issue:`22295`) +- :func:`DataFrame` and :func:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`) MultiIndex diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d6aa3117570af..76d3d704497b4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -400,6 +400,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) + data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index b3c893c7d84be..446ad72ac4a53 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -547,6 +547,7 @@ def sanitize_array(data, index, dtype=None, copy=False, mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) + data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8a5ec1a16d1df..c8b3f23db1492 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -757,6 +757,28 @@ def test_constructor_maskedarray_nonfloat(self): assert frame['A'][1] is True assert frame['C'][2] is False + def test_constructor_maskedarray_hardened(self): + # Check numpy masked arrays with hard masks -- from GH24574 + mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask() + result = pd.DataFrame(mat_hard, columns=['A', 'B'], index=[1, 2]) + expected = pd.DataFrame({ + 'A': [np.nan, np.nan], + 'B': [np.nan, np.nan]}, + columns=['A', 'B'], + index=[1, 2], + dtype=float) + tm.assert_frame_equal(result, expected) + # Check case where mask is hard but no data are masked + mat_hard = ma.ones((2, 2), dtype=float).harden_mask() + result = pd.DataFrame(mat_hard, columns=['A', 'B'], index=[1, 2]) + expected = pd.DataFrame({ + 'A': [1.0, 1.0], + 'B': [1.0, 1.0]}, + columns=['A', 'B'], + index=[1, 2], + dtype=float) + tm.assert_frame_equal(result, expected) + def test_constructor_mrecarray(self): # Ensure mrecarray produces frame identical to dict of masked arrays # from GH3479 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f5a445e2cca9a..667065d09758b 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -451,6 +451,13 @@ def test_constructor_maskedarray(self): datetime(2001, 1, 3)], index=index, dtype='M8[ns]') assert_series_equal(result, expected) + def test_constructor_maskedarray_hardened(self): + # Check numpy masked arrays with hard masks -- from GH24574 + data = ma.masked_all((3, ), dtype=float).harden_mask() + result = pd.Series(data) + expected = pd.Series([nan, nan, nan]) + tm.assert_series_equal(result, expected) + def test_series_ctor_plus_datetimeindex(self): rng = date_range('20090415', '20090519', freq='B') data = {k: 1 for k in rng}
- [x] closes #24574 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry For the whatsnew entry, is this reasonable? ```rst - :func:`DataFrame` and :func:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24581
2019-01-03T02:42:49Z
2019-01-04T00:24:23Z
2019-01-04T00:24:22Z
2019-01-04T00:39:26Z
Fix import format at pandas/tests/io/plotting directory
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index f41a3a10604af..4ca916a0aa4e4 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -1,25 +1,28 @@ #!/usr/bin/env python # coding: utf-8 -import pytest import os import warnings -from pandas import DataFrame, Series -from pandas.compat import zip, iteritems +import numpy as np +from numpy import random +import pytest + +from pandas.compat import iteritems, zip from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.api import is_list_like -import pandas.util.testing as tm -from pandas.util.testing import (ensure_clean, - assert_is_valid_plot_return_object) import pandas.util._test_decorators as td -import numpy as np -from numpy import random +from pandas.core.dtypes.api import is_list_like + +from pandas import DataFrame, Series +import pandas.util.testing as tm +from pandas.util.testing import ( + assert_is_valid_plot_return_object, ensure_clean) import pandas.plotting as plotting from pandas.plotting._tools import _flatten + """ This is a common base class used for various plotting tests """ diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index e89584ca35d94..7d721c7de3398 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -1,21 +1,20 @@ # coding: utf-8 -import pytest import itertools import string -from pandas import Series, DataFrame, MultiIndex -from pandas.compat import range, lzip -import pandas.util.testing as tm -import pandas.util._test_decorators as td - import numpy as np from numpy import random +import pytest -import pandas.plotting as plotting +from pandas.compat import lzip, range +import pandas.util._test_decorators as td -from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works) +from pandas import DataFrame, MultiIndex, Series +from pandas.tests.plotting.common import TestPlotBase, _check_plot_works +import pandas.util.testing as tm +import pandas.plotting as plotting """ Test cases for .boxplot method """ diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index eed3679c5bc8c..01aa8e8ccc1ee 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -1,19 +1,22 @@ +from datetime import date, datetime import subprocess import sys -import pytest -from datetime import datetime, date import numpy as np -from pandas import Timestamp, Period, Index, date_range, Series +import pytest + from pandas.compat import u +from pandas.compat.numpy import np_datetime64_compat + +from pandas import Index, Period, Series, Timestamp, date_range import pandas.core.config as cf import pandas.util.testing as tm -from pandas.tseries.offsets import Second, Milli, Micro, Day -from pandas.compat.numpy import np_datetime64_compat + +from pandas.tseries.offsets import Day, Micro, Milli, Second converter = pytest.importorskip('pandas.plotting._converter') -from pandas.plotting import (register_matplotlib_converters, - deregister_matplotlib_converters) +from pandas.plotting import (deregister_matplotlib_converters, # isort:skip + register_matplotlib_converters) def test_timtetonum_accepts_unicode(): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 7a28f05514dd5..c78ab41d2fae4 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,26 +1,25 @@ """ Test cases for time series specific (freq conversion, etc) """ -import sys -from datetime import datetime, timedelta, date, time +from datetime import date, datetime, time, timedelta import pickle +import sys +import numpy as np import pytest -from pandas.compat import lrange, zip -import numpy as np -from pandas import Index, Series, DataFrame, NaT, isna -from pandas.compat import PY3 -from pandas.core.indexes.datetimes import date_range, bdate_range +from pandas.compat import PY3, lrange, zip +import pandas.util._test_decorators as td + +from pandas import DataFrame, Index, NaT, Series, isna +from pandas.core.indexes.datetimes import bdate_range, date_range +from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.indexes.timedeltas import timedelta_range -from pandas.tseries.offsets import DateOffset -from pandas.core.indexes.period import period_range, Period, PeriodIndex from pandas.core.resample import DatetimeIndex - -from pandas.util.testing import assert_series_equal, ensure_clean +from pandas.tests.plotting.common import ( + TestPlotBase, _skip_if_no_scipy_gaussian_kde) import pandas.util.testing as tm -import pandas.util._test_decorators as td +from pandas.util.testing import assert_series_equal, ensure_clean -from pandas.tests.plotting.common import (TestPlotBase, - _skip_if_no_scipy_gaussian_kde) +from pandas.tseries.offsets import DateOffset @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index cc52130a10b2e..436ccef48ae12 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2,28 +2,29 @@ """ Test cases for DataFrame.plot """ -import pytest +from datetime import date, datetime import string import warnings -from datetime import datetime, date +import numpy as np +from numpy.random import rand, randn +import pytest -import pandas as pd -from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range, - bdate_range) -from pandas.core.dtypes.api import is_list_like -from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3 -from pandas.io.formats.printing import pprint_thing -import pandas.util.testing as tm +from pandas.compat import PY3, lmap, lrange, lzip, range, u, zip import pandas.util._test_decorators as td -import numpy as np -from numpy.random import rand, randn +from pandas.core.dtypes.api import is_list_like +import pandas as pd +from pandas import ( + DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range) +from pandas.tests.plotting.common import ( + TestPlotBase, _check_plot_works, _ok_for_gaussian_kde, + _skip_if_no_scipy_gaussian_kde) +import pandas.util.testing as tm + +from pandas.io.formats.printing import pprint_thing import pandas.plotting as plotting -from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, - _skip_if_no_scipy_gaussian_kde, - _ok_for_gaussian_kde) @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py index a7c99a06c34e9..5a5ee75928c97 100644 --- a/pandas/tests/plotting/test_groupby.py +++ b/pandas/tests/plotting/test_groupby.py @@ -3,13 +3,13 @@ """ Test cases for GroupBy.plot """ -from pandas import Series, DataFrame -import pandas.util.testing as tm -import pandas.util._test_decorators as td - import numpy as np +import pandas.util._test_decorators as td + +from pandas import DataFrame, Series from pandas.tests.plotting.common import TestPlotBase +import pandas.util.testing as tm @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 1d9942603a269..7bdbdac54f7a6 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -2,18 +2,18 @@ """ Test cases for .hist method """ +import numpy as np +from numpy.random import randn import pytest -from pandas import Series, DataFrame -import pandas.util.testing as tm import pandas.util._test_decorators as td -import numpy as np -from numpy.random import randn +from pandas import DataFrame, Series +from pandas.tests.plotting.common import TestPlotBase, _check_plot_works +import pandas.util.testing as tm -from pandas.plotting._core import grouped_hist from pandas.plotting._compat import _mpl_ge_2_2_0 -from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works) +from pandas.plotting._core import grouped_hist @td.skip_if_no_mpl diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index de9e2a16cd15e..44b95f7d1b00b 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -2,19 +2,19 @@ """ Test cases for misc plot functions """ +import numpy as np +from numpy import random +from numpy.random import randn import pytest -from pandas import DataFrame from pandas.compat import lmap -import pandas.util.testing as tm import pandas.util._test_decorators as td -import numpy as np -from numpy import random -from numpy.random import randn +from pandas import DataFrame +from pandas.tests.plotting.common import TestPlotBase, _check_plot_works +import pandas.util.testing as tm import pandas.plotting as plotting -from pandas.tests.plotting.common import TestPlotBase, _check_plot_works @td.skip_if_mpl diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index b857979005f5e..39f8f2f44fda0 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -3,24 +3,24 @@ """ Test cases for Series.plot """ +from datetime import datetime from itertools import chain + +import numpy as np +from numpy.random import randn import pytest -from datetime import datetime +from pandas.compat import lrange, range +import pandas.util._test_decorators as td import pandas as pd -from pandas import Series, DataFrame, date_range -from pandas.compat import range, lrange +from pandas import DataFrame, Series, date_range +from pandas.tests.plotting.common import ( + TestPlotBase, _check_plot_works, _ok_for_gaussian_kde, + _skip_if_no_scipy_gaussian_kde) import pandas.util.testing as tm -import pandas.util._test_decorators as td - -import numpy as np -from numpy.random import randn import pandas.plotting as plotting -from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, - _skip_if_no_scipy_gaussian_kde, - _ok_for_gaussian_kde) @td.skip_if_no_mpl diff --git a/setup.cfg b/setup.cfg index a1c82304c5a72..6c076eed580dd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -122,16 +122,6 @@ skip= pandas/tests/api/test_api.py, pandas/tests/tools/test_numeric.py, pandas/tests/internals/test_internals.py, - pandas/tests/plotting/test_datetimelike.py, - pandas/tests/plotting/test_series.py, - pandas/tests/plotting/test_groupby.py, - pandas/tests/plotting/test_converter.py, - pandas/tests/plotting/test_misc.py, - pandas/tests/plotting/test_frame.py, - pandas/tests/plotting/test_hist_method.py, - pandas/tests/plotting/common.py, - pandas/tests/plotting/test_boxplot_method.py, - pandas/tests/plotting/test_deprecated.py, pandas/tests/extension/test_sparse.py, pandas/tests/extension/base/reduce.py, pandas/tests/computation/test_compat.py,
- [x] partial #23334 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Ran `isort --recursive pandas/tests/io/plotting` and then checked imports using `isort --recursive --check-only pandas/tests/io/plotting`
https://api.github.com/repos/pandas-dev/pandas/pulls/24580
2019-01-03T02:16:38Z
2019-01-04T12:13:13Z
2019-01-04T12:13:13Z
2019-01-04T12:13:13Z
Fix import format at pandas/tests/io/arithmetic directory
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index 44e6cc664de6d..671fe69750c57 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- -import pytest - import numpy as np -import pandas as pd +import pytest from pandas.compat import long -import pandas.util.testing as tm +import pandas as pd +import pandas.util.testing as tm # ------------------------------------------------------------------ # Helper Functions + def id_func(x): if isinstance(x, tuple): assert len(x) == 2 diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index f5c4808a09123..7d01d39ae6bb5 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -2,29 +2,26 @@ # Arithmetic tests for DataFrame/Series/Index/Array classes that should # behave identically. # Specifically for datetime64 and datetime64tz dtypes -import operator from datetime import datetime, timedelta -import warnings from itertools import product, starmap +import operator +import warnings import numpy as np import pytest import pytz -import pandas as pd -import pandas.util.testing as tm - -from pandas.compat.numpy import np_datetime64_compat -from pandas.errors import PerformanceWarning, NullFrequencyError - from pandas._libs.tslibs.conversion import localize_pydatetime from pandas._libs.tslibs.offsets import shift_months +from pandas.compat.numpy import np_datetime64_compat +from pandas.errors import NullFrequencyError, PerformanceWarning -from pandas.core.indexes.datetimes import _to_M8 - +import pandas as pd from pandas import ( - Timestamp, Timedelta, Period, Series, date_range, NaT, - DatetimeIndex, TimedeltaIndex) + DatetimeIndex, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp, + date_range) +from pandas.core.indexes.datetimes import _to_M8 +import pandas.util.testing as tm def assert_all(obj): diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index c603485f6f076..7afb90978131d 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -6,20 +6,20 @@ from itertools import combinations import operator -import pytest import numpy as np - -import pandas as pd -import pandas.util.testing as tm +import pytest from pandas.compat import PY3, Iterable -from pandas.core import ops -from pandas import Timedelta, Series, Index, TimedeltaIndex +import pandas as pd +from pandas import Index, Series, Timedelta, TimedeltaIndex +from pandas.core import ops +import pandas.util.testing as tm # ------------------------------------------------------------------ # Comparisons + class TestNumericComparisons(object): def test_operator_series_comparison_zerorank(self): # GH#13006 diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index e9a3f4accc486..9917c45ef6d12 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -4,19 +4,18 @@ # Specifically for object dtype import operator -import pytest import numpy as np +import pytest import pandas as pd -import pandas.util.testing as tm -from pandas.core import ops - from pandas import Series, Timestamp - +from pandas.core import ops +import pandas.util.testing as tm # ------------------------------------------------------------------ # Comparisons + class TestObjectComparisons(object): def test_comparison_object_numeric_nas(self): diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 469353042a878..cdacd4b42d683 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -7,20 +7,20 @@ import numpy as np import pytest -import pandas as pd -import pandas.util.testing as tm - from pandas._libs.tslibs.period import IncompatibleFrequency from pandas.errors import PerformanceWarning +import pandas as pd +from pandas import Period, PeriodIndex, Series, period_range from pandas.core import ops -from pandas import Period, PeriodIndex, period_range, Series -from pandas.tseries.frequencies import to_offset +import pandas.util.testing as tm +from pandas.tseries.frequencies import to_offset # ------------------------------------------------------------------ # Comparisons + class TestPeriodIndexComparisons(object): @pytest.mark.parametrize("other", ["2017", 2017]) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 12ed174d6cc53..4474b06b19536 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -3,17 +3,16 @@ # behave identically. from datetime import datetime, timedelta -import pytest import numpy as np - -import pandas as pd -import pandas.util.testing as tm +import pytest from pandas.errors import NullFrequencyError, PerformanceWarning + +import pandas as pd from pandas import ( - timedelta_range, - Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex, - DataFrame) + DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex, + Timestamp, timedelta_range) +import pandas.util.testing as tm def get_upcast_box(box, vector): diff --git a/setup.cfg b/setup.cfg index c21f09f131dbd..a1c82304c5a72 100644 --- a/setup.cfg +++ b/setup.cfg @@ -121,12 +121,6 @@ skip= pandas/tests/api/test_types.py, pandas/tests/api/test_api.py, pandas/tests/tools/test_numeric.py, - pandas/tests/arithmetic/test_numeric.py, - pandas/tests/arithmetic/test_object.py, - pandas/tests/arithmetic/test_period.py, - pandas/tests/arithmetic/test_datetime64.py, - pandas/tests/arithmetic/conftest.py, - pandas/tests/arithmetic/test_timedelta64.py, pandas/tests/internals/test_internals.py, pandas/tests/plotting/test_datetimelike.py, pandas/tests/plotting/test_series.py,
- [x] partial #23334 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Ran `isort --recursive pandas/tests/io/arithmetic` and then checked imports using `isort --recursive --check-only pandas/tests/io/arithmetic`
https://api.github.com/repos/pandas-dev/pandas/pulls/24579
2019-01-03T02:12:42Z
2019-01-04T00:39:37Z
2019-01-04T00:39:37Z
2019-01-04T00:39:40Z
Fix import format at pandas/tests/io/dtypes directory
diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index fcdcf96098f16..871e71ea2e4b0 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -5,30 +5,24 @@ """ -import pytest -from datetime import datetime, timedelta, date -import numpy as np +from datetime import date, datetime, timedelta -import pandas as pd -from pandas import (Timedelta, Timestamp, DatetimeIndex, - DataFrame, NaT, Period, Series) +import numpy as np +import pytest from pandas.core.dtypes.cast import ( - maybe_downcast_to_dtype, - maybe_convert_objects, - cast_scalar_to_array, - infer_dtype_from_scalar, - infer_dtype_from_array, - find_common_type, - construct_1d_object_array_from_listlike, + cast_scalar_to_array, construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, - construct_1d_arraylike_from_scalar) + construct_1d_object_array_from_listlike, find_common_type, + infer_dtype_from_array, infer_dtype_from_scalar, maybe_convert_objects, + maybe_downcast_to_dtype) +from pandas.core.dtypes.common import is_dtype_equal from pandas.core.dtypes.dtypes import ( - CategoricalDtype, - DatetimeTZDtype, - PeriodDtype) -from pandas.core.dtypes.common import ( - is_dtype_equal) + CategoricalDtype, DatetimeTZDtype, PeriodDtype) + +import pandas as pd +from pandas import ( + DataFrame, DatetimeIndex, NaT, Period, Series, Timedelta, Timestamp) from pandas.util import testing as tm diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 2d6d3101f7371..5fcf19b0b12e7 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -1,15 +1,16 @@ # -*- coding: utf-8 -*- -import pytest import numpy as np -import pandas as pd +import pytest -from pandas.core.dtypes.dtypes import (DatetimeTZDtype, PeriodDtype, - CategoricalDtype, IntervalDtype) -from pandas.core.sparse.api import SparseDtype +import pandas.util._test_decorators as td import pandas.core.dtypes.common as com -import pandas.util._test_decorators as td +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype) + +import pandas as pd +from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py index 35623415571c0..d58f8ee3b74f1 100644 --- a/pandas/tests/dtypes/test_concat.py +++ b/pandas/tests/dtypes/test_concat.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- import pytest + import pandas.core.dtypes.concat as _concat + from pandas import ( - Index, DatetimeIndex, PeriodIndex, TimedeltaIndex, Series, Period) + DatetimeIndex, Index, Period, PeriodIndex, Series, TimedeltaIndex) @pytest.mark.parametrize('to_concat, expected', [ diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index aa29473ddf130..ab52a8a81385c 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -1,24 +1,20 @@ # -*- coding: utf-8 -*- import re -import pytest import numpy as np -import pandas as pd -from pandas import ( - Series, Categorical, CategoricalIndex, IntervalIndex, date_range) +import pytest -from pandas.core.dtypes.dtypes import ( - DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, registry) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_categorical, - is_datetime64tz_dtype, is_datetimetz, - is_period_dtype, is_period, - is_dtype_equal, is_datetime64_ns_dtype, - is_datetime64_dtype, is_interval_dtype, - is_datetime64_any_dtype, is_string_dtype, - is_bool_dtype, -) + is_bool_dtype, is_categorical, is_categorical_dtype, + is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, + is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype, + is_period, is_period_dtype, is_string_dtype) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry) + +import pandas as pd +from pandas import ( + Categorical, CategoricalIndex, IntervalIndex, Series, date_range) from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index 53fa482bdeaef..96f92fccc5a71 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -1,9 +1,12 @@ # -*- coding: utf-8 -*- from warnings import catch_warnings, simplefilter + import numpy as np -import pandas as pd + from pandas.core.dtypes import generic as gt + +import pandas as pd from pandas.util import testing as tm diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index fff91991ee251..cc2aa64b98c8b 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -5,42 +5,34 @@ related to inference and not otherwise tested in types/test_common.py """ -from warnings import catch_warnings, simplefilter import collections -import re -from datetime import datetime, date, timedelta, time +from datetime import date, datetime, time, timedelta from decimal import Decimal -from numbers import Number from fractions import Fraction +from numbers import Number +import re +from warnings import catch_warnings, simplefilter + import numpy as np -import pytz import pytest -import pandas as pd -from pandas._libs import lib, iNaT, missing as libmissing -from pandas import (Series, Index, DataFrame, Timedelta, - DatetimeIndex, TimedeltaIndex, Timestamp, - Panel, Period, Categorical, isna, Interval, - DateOffset) -from pandas import compat -from pandas.compat import u, PY2, StringIO, lrange +import pytz + +from pandas._libs import iNaT, lib, missing as libmissing +from pandas.compat import PY2, StringIO, lrange, u +import pandas.util._test_decorators as td + from pandas.core.dtypes import inference from pandas.core.dtypes.common import ( - is_timedelta64_dtype, - is_timedelta64_ns_dtype, - is_datetime64_dtype, - is_datetime64_ns_dtype, - is_datetime64_any_dtype, - is_datetime64tz_dtype, - is_number, - is_integer, - is_float, - is_bool, - is_scalar, - is_scipy_sparse, - ensure_int32, - ensure_categorical) + ensure_categorical, ensure_int32, is_bool, is_datetime64_any_dtype, + is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, + is_float, is_integer, is_number, is_scalar, is_scipy_sparse, + is_timedelta64_dtype, is_timedelta64_ns_dtype) + +import pandas as pd +from pandas import ( + Categorical, DataFrame, DateOffset, DatetimeIndex, Index, Interval, Panel, + Period, Series, Timedelta, TimedeltaIndex, Timestamp, compat, isna) from pandas.util import testing as tm -import pandas.util._test_decorators as td @pytest.fixture(params=[True, False], ids=str) diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index cb3f5933c885f..56c9395d0f802 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -1,25 +1,26 @@ # -*- coding: utf-8 -*- -import pytest -from warnings import catch_warnings, simplefilter -import numpy as np from datetime import datetime -from pandas.util import testing as tm +from warnings import catch_warnings, simplefilter -import pandas as pd -from pandas.core import config as cf -from pandas.compat import u +import numpy as np +import pytest from pandas._libs import missing as libmissing from pandas._libs.tslib import iNaT -from pandas import (NaT, Float64Index, Series, - DatetimeIndex, TimedeltaIndex, date_range) +from pandas.compat import u + from pandas.core.dtypes.common import is_scalar from pandas.core.dtypes.dtypes import ( - DatetimeTZDtype, PeriodDtype, IntervalDtype) + DatetimeTZDtype, IntervalDtype, PeriodDtype) from pandas.core.dtypes.missing import ( - array_equivalent, isna, notna, isnull, notnull, - na_value_for_dtype) + array_equivalent, isna, isnull, na_value_for_dtype, notna, notnull) + +import pandas as pd +from pandas import ( + DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range) +from pandas.core import config as cf +from pandas.util import testing as tm @pytest.mark.parametrize('notna_f', [notna, notnull]) diff --git a/setup.cfg b/setup.cfg index 032a41df90f83..c21f09f131dbd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -121,13 +121,6 @@ skip= pandas/tests/api/test_types.py, pandas/tests/api/test_api.py, pandas/tests/tools/test_numeric.py, - pandas/tests/dtypes/test_concat.py, - pandas/tests/dtypes/test_generic.py, - pandas/tests/dtypes/test_common.py, - pandas/tests/dtypes/test_cast.py, - pandas/tests/dtypes/test_dtypes.py, - pandas/tests/dtypes/test_inference.py, - pandas/tests/dtypes/test_missing.py, pandas/tests/arithmetic/test_numeric.py, pandas/tests/arithmetic/test_object.py, pandas/tests/arithmetic/test_period.py,
- [x] partial #23334 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Ran` isort --recursive pandas/tests/io/dtypes` and then checked imports using `isort --recursive --check-only pandas/tests/io/dtypes`
https://api.github.com/repos/pandas-dev/pandas/pulls/24578
2019-01-03T02:09:08Z
2019-01-03T02:57:21Z
2019-01-03T02:57:21Z
2019-01-03T02:57:23Z
DTA Followups - remove redundant methods
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 517c80619baea..3ca660b906f73 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -19,12 +19,11 @@ from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( - is_bool_dtype, is_categorical_dtype, is_datetime64_any_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, - is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, - is_list_like, is_object_dtype, is_offsetlike, is_period_dtype, - is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, - needs_i8_conversion, pandas_dtype) + is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, + is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, + is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, + is_object_dtype, is_offsetlike, is_period_dtype, is_string_dtype, + is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype) from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import isna @@ -40,32 +39,6 @@ from .base import ExtensionArray, ExtensionOpsMixin -def _make_comparison_op(cls, op): - # TODO: share code with indexes.base version? Main difference is that - # the block for MultiIndex was removed here. - def cmp_method(self, other): - if isinstance(other, ABCDataFrame): - return NotImplemented - - if needs_i8_conversion(self) and needs_i8_conversion(other): - # we may need to directly compare underlying - # representations - return self._evaluate_compare(other, op) - - # numpy will show a DeprecationWarning on invalid elementwise - # comparisons, this will raise in the future - with warnings.catch_warnings(record=True): - warnings.filterwarnings("ignore", "elementwise", FutureWarning) - with np.errstate(all='ignore'): - result = op(self._data, np.asarray(other)) - - return result - - name = '__{name}__'.format(name=op.__name__) - # TODO: docstring? - return compat.set_function_name(cmp_method, name, cls) - - class AttributesMixin(object): @property @@ -1358,41 +1331,6 @@ def __isub__(self, other): # -------------------------------------------------------------- # Comparison Methods - # Called by _add_comparison_methods defined in ExtensionOpsMixin - _create_comparison_method = classmethod(_make_comparison_op) - - def _evaluate_compare(self, other, op): - """ - We have been called because a comparison between - 8 aware arrays. numpy will warn about NaT comparisons - """ - # Called by comparison methods when comparing datetimelike - # with datetimelike - - if not isinstance(other, type(self)): - # coerce to a similar object - if not is_list_like(other): - # scalar - other = [other] - elif lib.is_scalar(lib.item_from_zerodim(other)): - # ndarray scalar - other = [other.item()] - other = type(self)._from_sequence(other) - - # compare - result = op(self.asi8, other.asi8) - - # technically we could support bool dtyped Index - # for now just return the indexing array directly - mask = (self._isnan) | (other._isnan) - - filler = iNaT - if is_bool_dtype(result): - filler = False - - result[mask] = filler - return result - def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', from_utc=False): """ @@ -1493,9 +1431,6 @@ def max(self, axis=None, skipna=True, *args, **kwargs): return self._box_func(result) -DatetimeLikeArrayMixin._add_comparison_ops() - - # ------------------------------------------------------------------- # Shared Constructor Helpers diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ea2742c5808a3..f5903e19d2c45 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -34,7 +34,7 @@ _midnight = time(0, 0) -def _to_m8(key, tz=None): +def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ @@ -96,7 +96,6 @@ def _dt_array_cmp(cls, op): nat_result = True if opname == '__ne__' else False def wrapper(self, other): - meth = getattr(dtl.DatetimeLikeArrayMixin, opname) # TODO: return NotImplemented for Series / Index and let pandas unbox # Right now, returning NotImplemented for Index fails because we # go into the index implementation, which may be a bug? @@ -109,7 +108,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) try: - other = _to_m8(other, tz=self.tz) + other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) @@ -158,7 +157,7 @@ def wrapper(self, other): # or an object-dtype ndarray other = type(self)._from_sequence(other) - result = meth(self, other) + result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index b747e2b6b096b..6a7225acfefbf 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -36,18 +36,6 @@ _BAD_DTYPE = "dtype {dtype} cannot be converted to timedelta64[ns]" -def _to_m8(key): - """ - Timedelta-like => dt64 - """ - if not isinstance(key, Timedelta): - # this also converts strings - key = Timedelta(key) - - # return an type that can be compared - return np.int64(key.value).view(_TD_DTYPE) - - def _is_convertible_to_td(key): return isinstance(key, (Tick, timedelta, np.timedelta64, compat.string_types)) @@ -75,17 +63,15 @@ def _td_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False - meth = getattr(dtl.DatetimeLikeArrayMixin, opname) - def wrapper(self, other): if _is_convertible_to_td(other) or other is NaT: try: - other = _to_m8(other) + other = Timedelta(other) except ValueError: # failed to parse as timedelta return ops.invalid_comparison(self, other, op) - result = meth(self, other) + result = op(self.view('i8'), other.value) if isna(other): result.fill(nat_result) @@ -101,7 +87,7 @@ def wrapper(self, other): except (ValueError, TypeError): return ops.invalid_comparison(self, other, op) - result = meth(self, other) + result = op(self.view('i8'), other.view('i8')) result = com.values_from_object(result) o_mask = np.array(isna(other)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5547266ea6bab..cfca5d1b7d2cc 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -13,8 +13,8 @@ from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg from pandas.core.dtypes.common import ( - ensure_int64, is_bool_dtype, is_dtype_equal, is_float, is_integer, - is_list_like, is_period_dtype, is_scalar) + ensure_int64, is_dtype_equal, is_float, is_integer, is_list_like, + is_period_dtype, is_scalar) from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries from pandas.core import algorithms, ops @@ -191,16 +191,6 @@ def wrapper(left, right): return wrapper - @Appender(DatetimeLikeArrayMixin._evaluate_compare.__doc__) - def _evaluate_compare(self, other, op): - result = self._eadata._evaluate_compare(other, op) - if is_bool_dtype(result): - return result - try: - return Index(result) - except TypeError: - return result - def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', from_utc=False): # See DatetimeLikeArrayMixin._ensure_localized.__doc__ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6d9829d4ef659..7d901f4656731 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -22,7 +22,7 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays.datetimes import ( - DatetimeArrayMixin as DatetimeArray, _to_m8, validate_tz_from_dtype) + DatetimeArrayMixin as DatetimeArray, _to_M8, validate_tz_from_dtype) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index @@ -405,7 +405,7 @@ def __setstate__(self, state): def _convert_for_op(self, value): """ Convert value to be insertable to ndarray """ if self._has_same_tz(value): - return _to_m8(value) + return _to_M8(value) raise ValueError('Passed item and index have different timezone') def _maybe_update_attributes(self, attrs): @@ -1161,7 +1161,7 @@ def searchsorted(self, value, side='left', sorter=None): if isinstance(value, (np.ndarray, Index)): value = np.array(value, dtype=_NS_DTYPE, copy=False) else: - value = _to_m8(value, tz=self.tz) + value = _to_M8(value, tz=self.tz) return self.values.searchsorted(value, side=side) @@ -1211,7 +1211,7 @@ def insert(self, loc, item): freq = self.freq elif (loc == len(self)) and item - self.freq == self[-1]: freq = self.freq - item = _to_m8(item, tz=self.tz) + item = _to_M8(item, tz=self.tz) try: new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0eeb7551db26f..b59c32bb8a9d4 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -276,9 +276,6 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): result._reset_identity() return result - # ------------------------------------------------------------------------ - # Wrapping PeriodArray - # ------------------------------------------------------------------------ # Data @@ -416,6 +413,10 @@ def _mpl_repr(self): # how to represent ourselves to matplotlib return self.astype(object).values + @property + def _formatter_func(self): + return self.array._formatter(boxed=False) + # ------------------------------------------------------------------------ # Indexing @@ -496,10 +497,6 @@ def __array_wrap__(self, result, context=None): # cannot pass _simple_new as it is return type(self)(result, freq=self.freq, name=self.name) - @property - def _formatter_func(self): - return self.array._formatter(boxed=False) - def asof_locs(self, where, mask): """ where : array of timestamps diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 241d12dd06159..5e8e6a423ab3f 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -18,7 +18,7 @@ from pandas.core.accessor import delegate_names from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays.timedeltas import ( - TimedeltaArrayMixin as TimedeltaArray, _is_convertible_to_td, _to_m8) + TimedeltaArrayMixin as TimedeltaArray, _is_convertible_to_td) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs @@ -614,7 +614,7 @@ def searchsorted(self, value, side='left', sorter=None): if isinstance(value, (np.ndarray, Index)): value = np.array(value, dtype=_TD_DTYPE, copy=False) else: - value = _to_m8(value) + value = Timedelta(value).asm8.view(_TD_DTYPE) return self.values.searchsorted(value, side=side, sorter=sorter) @@ -664,7 +664,7 @@ def insert(self, loc, item): freq = self.freq elif (loc == len(self)) and item - self.freq == self[-1]: freq = self.freq - item = _to_m8(item) + item = Timedelta(item).asm8.view(_TD_DTYPE) try: new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)], diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index d4e82fe2659a0..f5c4808a09123 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -20,7 +20,7 @@ from pandas._libs.tslibs.conversion import localize_pydatetime from pandas._libs.tslibs.offsets import shift_months -from pandas.core.indexes.datetimes import _to_m8 +from pandas.core.indexes.datetimes import _to_M8 from pandas import ( Timestamp, Timedelta, Period, Series, date_range, NaT, @@ -349,7 +349,7 @@ class TestDatetimeIndexComparisons(object): def test_comparators(self, op): index = tm.makeDateIndex(100) element = index[len(index) // 2] - element = _to_m8(element) + element = _to_M8(element) arr = np.array(index) arr_result = op(arr, element) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index dbdbb0bc238a9..f60d73ea1b05b 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -400,98 +400,98 @@ def test_value_counts_unique_nunique(self): assert o.nunique() == len(np.unique(o.values)) - def test_value_counts_unique_nunique_null(self): + @pytest.mark.parametrize('null_obj', [np.nan, None]) + def test_value_counts_unique_nunique_null(self, null_obj): - for null_obj in [np.nan, None]: - for orig in self.objs: - o = orig.copy() - klass = type(o) - values = o._ndarray_values - - if not self._allow_na_ops(o): - continue + for orig in self.objs: + o = orig.copy() + klass = type(o) + values = o._ndarray_values - # special assign to the numpy array - if is_datetime64tz_dtype(o): - if isinstance(o, DatetimeIndex): - v = o.asi8 - v[0:2] = iNaT - values = o._shallow_copy(v) - else: - o = o.copy() - o[0:2] = iNaT - values = o._values + if not self._allow_na_ops(o): + continue - elif needs_i8_conversion(o): - values[0:2] = iNaT - values = o._shallow_copy(values) + # special assign to the numpy array + if is_datetime64tz_dtype(o): + if isinstance(o, DatetimeIndex): + v = o.asi8 + v[0:2] = iNaT + values = o._shallow_copy(v) else: - values[0:2] = null_obj - # check values has the same dtype as the original + o = o.copy() + o[0:2] = iNaT + values = o._values - assert values.dtype == o.dtype + elif needs_i8_conversion(o): + values[0:2] = iNaT + values = o._shallow_copy(values) + else: + values[0:2] = null_obj + # check values has the same dtype as the original - # create repeated values, 'n'th element is repeated by n+1 - # times - if isinstance(o, (DatetimeIndex, PeriodIndex)): - expected_index = o.copy() - expected_index.name = None + assert values.dtype == o.dtype - # attach name to klass - o = klass(values.repeat(range(1, len(o) + 1))) - o.name = 'a' - else: - if isinstance(o, DatetimeIndex): - expected_index = orig._values._shallow_copy(values) - else: - expected_index = Index(values) - expected_index.name = None - o = o.repeat(range(1, len(o) + 1)) - o.name = 'a' + # create repeated values, 'n'th element is repeated by n+1 + # times + if isinstance(o, (DatetimeIndex, PeriodIndex)): + expected_index = o.copy() + expected_index.name = None - # check values has the same dtype as the original - assert o.dtype == orig.dtype - # check values correctly have NaN - nanloc = np.zeros(len(o), dtype=np.bool) - nanloc[:3] = True - if isinstance(o, Index): - tm.assert_numpy_array_equal(pd.isna(o), nanloc) - else: - exp = Series(nanloc, o.index, name='a') - tm.assert_series_equal(pd.isna(o), exp) - - expected_s_na = Series(list(range(10, 2, -1)) + [3], - index=expected_index[9:0:-1], - dtype='int64', name='a') - expected_s = Series(list(range(10, 2, -1)), - index=expected_index[9:1:-1], - dtype='int64', name='a') - - result_s_na = o.value_counts(dropna=False) - tm.assert_series_equal(result_s_na, expected_s_na) - assert result_s_na.index.name is None - assert result_s_na.name == 'a' - result_s = o.value_counts() - tm.assert_series_equal(o.value_counts(), expected_s) - assert result_s.index.name is None - assert result_s.name == 'a' - - result = o.unique() - if isinstance(o, Index): - tm.assert_index_equal(result, - Index(values[1:], name='a')) - elif is_datetime64tz_dtype(o): - # unable to compare NaT / nan - tm.assert_extension_array_equal(result[1:], values[2:]) - assert result[0] is pd.NaT + # attach name to klass + o = klass(values.repeat(range(1, len(o) + 1))) + o.name = 'a' + else: + if isinstance(o, DatetimeIndex): + expected_index = orig._values._shallow_copy(values) else: - tm.assert_numpy_array_equal(result[1:], values[2:]) + expected_index = Index(values) + expected_index.name = None + o = o.repeat(range(1, len(o) + 1)) + o.name = 'a' + + # check values has the same dtype as the original + assert o.dtype == orig.dtype + # check values correctly have NaN + nanloc = np.zeros(len(o), dtype=np.bool) + nanloc[:3] = True + if isinstance(o, Index): + tm.assert_numpy_array_equal(pd.isna(o), nanloc) + else: + exp = Series(nanloc, o.index, name='a') + tm.assert_series_equal(pd.isna(o), exp) + + expected_s_na = Series(list(range(10, 2, -1)) + [3], + index=expected_index[9:0:-1], + dtype='int64', name='a') + expected_s = Series(list(range(10, 2, -1)), + index=expected_index[9:1:-1], + dtype='int64', name='a') + + result_s_na = o.value_counts(dropna=False) + tm.assert_series_equal(result_s_na, expected_s_na) + assert result_s_na.index.name is None + assert result_s_na.name == 'a' + result_s = o.value_counts() + tm.assert_series_equal(o.value_counts(), expected_s) + assert result_s.index.name is None + assert result_s.name == 'a' + + result = o.unique() + if isinstance(o, Index): + tm.assert_index_equal(result, + Index(values[1:], name='a')) + elif is_datetime64tz_dtype(o): + # unable to compare NaT / nan + tm.assert_extension_array_equal(result[1:], values[2:]) + assert result[0] is pd.NaT + else: + tm.assert_numpy_array_equal(result[1:], values[2:]) - assert pd.isna(result[0]) - assert result.dtype == orig.dtype + assert pd.isna(result[0]) + assert result.dtype == orig.dtype - assert o.nunique() == 8 - assert o.nunique(dropna=False) == 9 + assert o.nunique() == 8 + assert o.nunique(dropna=False) == 9 @pytest.mark.parametrize('klass', [Index, Series]) def test_value_counts_inferred(self, klass): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index a938c1fe9a8fe..ac3955970587f 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -13,7 +13,7 @@ from pandas.compat import range from pandas.compat.numpy import np_datetime64_compat -from pandas.core.indexes.datetimes import DatetimeIndex, _to_m8, date_range +from pandas.core.indexes.datetimes import DatetimeIndex, _to_M8, date_range from pandas.core.series import Series import pandas.util.testing as tm @@ -47,9 +47,9 @@ class WeekDay(object): #### -def test_to_m8(): +def test_to_M8(): valb = datetime(2007, 10, 1) - valu = _to_m8(valb) + valu = _to_M8(valb) assert isinstance(valu, np.datetime64) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index a6ba62bbdea1e..ebdfde2da24f8 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1861,10 +1861,6 @@ def getCols(k): return string.ascii_uppercase[:k] -def getArangeMat(): - return np.arange(N * K).reshape((N, K)) - - # make index def makeStringIndex(k=10, name=None): return Index(rands_array(nchars=10, size=k), name=name) @@ -2322,13 +2318,6 @@ def add_nans(panel): return panel -def add_nans_panel4d(panel4d): - for l, label in enumerate(panel4d.labels): - panel = panel4d[label] - add_nans(panel) - return panel4d - - class TestSubDict(dict): def __init__(self, *args, **kwargs):
Both arrays.datetimes and arrays.timedeltas have a `_to_m8` function. The timedeltas one is removed since it is unnecessary, the datetimes one is given a more accurate name `_to_M8` A couple of unused funcs from `tm` are removed. A test is parametrized.
https://api.github.com/repos/pandas-dev/pandas/pulls/24577
2019-01-03T01:23:36Z
2019-01-03T02:25:08Z
2019-01-03T02:25:07Z
2019-01-03T02:31:39Z
REF: split pd.util.testing
diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py index 046fc19c0d9c8..f4cff9876d311 100644 --- a/pandas/tests/indexing/multiindex/conftest.py +++ b/pandas/tests/indexing/multiindex/conftest.py @@ -22,6 +22,7 @@ def multiindex_year_month_day_dataframe_random_data(): """DataFrame with 3 level MultiIndex (year, month, day) covering first 100 business days from 2000-01-01 with random data""" tm.N = 100 + tm.strategies.N = 100 tdf = tm.makeTimeDataFrame() ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 7a28f05514dd5..be2cef343055a 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -316,8 +316,10 @@ def test_business_freq(self): def test_business_freq_convert(self): n = tm.N tm.N = 300 + tm.strategies.N = 300 bts = tm.makeTimeSeries().asfreq('BM') tm.N = n + tm.strategies.N = n ts = bts.to_period('M') _, ax = self.plt.subplots() bts.plot(ax=ax) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 8ee1e49f01ac1..55cd8aedc1b3a 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -16,8 +16,6 @@ import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal -a_ = np.array - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class TestJoin(object): @@ -45,8 +43,8 @@ def setup_method(self, method): index=data['C']) def test_cython_left_outer_join(self): - left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) + right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) max_group = 5 ls, rs = libjoin.left_outer_join(left, right, max_group) @@ -54,10 +52,10 @@ def test_cython_left_outer_join(self): exp_ls = left.argsort(kind='mergesort') exp_rs = right.argsort(kind='mergesort') - exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, - 6, 6, 7, 7, 8, 8, 9, 10]) - exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, - 4, 5, 4, 5, 4, 5, -1, -1]) + exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 7, 8, 8, 9, 10]) + exp_ri = np.array([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, + 4, 5, 4, 5, 4, 5, -1, -1]) exp_ls = exp_ls.take(exp_li) exp_ls[exp_li == -1] = -1 @@ -69,8 +67,8 @@ def test_cython_left_outer_join(self): tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False) def test_cython_right_outer_join(self): - left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) + right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) max_group = 5 rs, ls = libjoin.left_outer_join(right, left, max_group) @@ -78,12 +76,12 @@ def test_cython_right_outer_join(self): exp_ls = left.argsort(kind='mergesort') exp_rs = right.argsort(kind='mergesort') - # 0 1 1 1 - exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5, - # 2 2 4 - 6, 7, 8, 6, 7, 8, -1]) - exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, - 4, 4, 4, 5, 5, 5, 6]) + # 0 1 1 1 + exp_li = np.array([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5, + # 2 2 4 + 6, 7, 8, 6, 7, 8, -1]) + exp_ri = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, + 4, 4, 4, 5, 5, 5, 6]) exp_ls = exp_ls.take(exp_li) exp_ls[exp_li == -1] = -1 @@ -95,8 +93,8 @@ def test_cython_right_outer_join(self): tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False) def test_cython_inner_join(self): - left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) - right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64) + left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) + right = np.array([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64) max_group = 5 ls, rs = libjoin.inner_join(left, right, max_group) @@ -104,10 +102,10 @@ def test_cython_inner_join(self): exp_ls = left.argsort(kind='mergesort') exp_rs = right.argsort(kind='mergesort') - exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, - 6, 6, 7, 7, 8, 8]) - exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, - 4, 5, 4, 5, 4, 5]) + exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 7, 8, 8]) + exp_ri = np.array([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, + 4, 5, 4, 5, 4, 5]) exp_ls = exp_ls.take(exp_li) exp_ls[exp_li == -1] = -1 @@ -700,8 +698,10 @@ def test_panel_join_overlap(self): def test_panel_join_many(self): with catch_warnings(record=True): tm.K = 10 + tm.strategies.K = 10 panel = tm.makePanel() tm.K = 4 + tm.strategies.K = 4 panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]] diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 90cf6916df0d1..5c348fced8e39 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -10,6 +10,8 @@ import pandas.compat as compat from pandas.compat import lrange +from pandas.core.dtypes.common import is_number + import pandas as pd from pandas import DataFrame, Index, Series, isna from pandas.conftest import _get_cython_table_params @@ -368,7 +370,7 @@ def test_agg_cython_table(self, series, func, expected): # test reducing functions in # pandas.core.base.SelectionMixin._cython_table result = series.agg(func) - if tm.is_number(expected): + if is_number(expected): assert np.isclose(result, expected, equal_nan=True) else: assert result == expected diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index e13cb9edffe2b..8c774e778057a 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -90,7 +90,7 @@ def test_combine_first(self): # mixed types index = tm.makeStringIndex(20) - floats = Series(tm.randn(20), index=index) + floats = Series(np.random.randn(20), index=index) strings = Series(tm.makeStringIndex(10), index=index[::2]) combined = strings.combine_first(floats) diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index b4e7708e2456e..03da4e7c8f3ee 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -65,8 +65,8 @@ def test_repr(self): str(self.series.astype(int)) str(self.objSeries) - str(Series(tm.randn(1000), index=np.arange(1000))) - str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1))) + str(Series(np.random.randn(1000), index=np.arange(1000))) + str(Series(np.random.randn(1000), index=np.arange(1000, 0, step=-1))) # empty str(self.empty) @@ -89,7 +89,7 @@ def test_repr(self): self.series.name = name repr(self.series) - biggie = Series(tm.randn(1000), index=np.arange(1000), + biggie = Series(np.random.randn(1000), index=np.arange(1000), name=('foo', 'bar', 'baz')) repr(biggie) diff --git a/pandas/util/testing/__init__.py b/pandas/util/testing/__init__.py new file mode 100644 index 0000000000000..b6688a0950992 --- /dev/null +++ b/pandas/util/testing/__init__.py @@ -0,0 +1,1320 @@ +from __future__ import division + +from contextlib import contextmanager +from functools import wraps +import locale +import os +import re +from shutil import rmtree +import string +import subprocess +import sys +import tempfile +import traceback +import warnings + +import numpy as np +from numpy.random import rand + +import pandas.compat as compat +from pandas.compat import ( + PY2, PY3, callable, filter, httplib, lrange, map, range, u, unichr, zip) + +from pandas.core.dtypes.common import ( + is_datetime64_dtype, is_datetime64tz_dtype, is_period_dtype, + is_timedelta64_dtype) + +import pandas as pd +from pandas import Categorical, DataFrame, Index, Series +from pandas.core.arrays import ( + DatetimeArrayMixin as DatetimeArray, PeriodArray, + TimedeltaArrayMixin as TimedeltaArray, period_array) +import pandas.core.common as com + +from pandas.io.common import urlopen + +from .asserters import ( # noqa:F401 + assert_almost_equal, assert_attr_equal, assert_categorical_equal, + assert_class_equal, assert_datetime_array_equal, assert_dict_equal, + assert_equal, assert_extension_array_equal, assert_frame_equal, + assert_index_equal, assert_interval_array_equal, + assert_is_valid_plot_return_object, assert_numpy_array_equal, + assert_panel_equal, assert_period_array_equal, assert_produces_warning, + assert_raises_regex, assert_series_equal, assert_sp_array_equal, + assert_sp_frame_equal, assert_sp_series_equal, + assert_timedelta_array_equal, raise_assert_detail) +from .strategies import ( # noqa:F401 + getMixedTypeDict, getPeriodData, getSeriesData, getTimeSeriesData, + makeBoolIndex, makeCategoricalIndex, makeCustomDataframe, makeCustomIndex, + makeDataFrame, makeDateIndex, makeFloatIndex, makeFloatSeries, + makeIntervalIndex, makeIntIndex, makeMissingCustomDataframe, + makeMissingDataframe, makeMixedDataFrame, makeMultiIndex, makeObjectSeries, + makePanel, makePeriodFrame, makePeriodIndex, makePeriodPanel, + makePeriodSeries, makeRangeIndex, makeStringIndex, makeStringSeries, + makeTimeDataFrame, makeTimedeltaIndex, makeTimeSeries, makeUIntIndex, + makeUnicodeIndex) + +N = 30 +K = 4 +_RAISE_NETWORK_ERROR_DEFAULT = False + +# set testing_mode +_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning) + + +def set_testing_mode(): + # set the testing mode filters + testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None') + if 'deprecate' in testing_mode: + warnings.simplefilter('always', _testing_mode_warnings) + + +def reset_testing_mode(): + # reset the testing mode filters + testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None') + if 'deprecate' in testing_mode: + warnings.simplefilter('ignore', _testing_mode_warnings) + + +set_testing_mode() + + +def reset_display_options(): + """ + Reset the display options for printing and representing objects. + """ + + pd.reset_option('^display.', silent=True) + + +def round_trip_pickle(obj, path=None): + """ + Pickle an object and then read it again. + + Parameters + ---------- + obj : pandas object + The object to pickle and then re-read. + path : str, default None + The path where the pickled object is written and then read. + + Returns + ------- + round_trip_pickled_object : pandas object + The original object that was pickled and then re-read. + """ + + if path is None: + path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10))) + with ensure_clean(path) as path: + pd.to_pickle(obj, path) + return pd.read_pickle(path) + + +def round_trip_pathlib(writer, reader, path=None): + """ + Write an object to file specified by a pathlib.Path and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + round_trip_object : pandas object + The original object that was serialized and then re-read. + """ + + import pytest + Path = pytest.importorskip('pathlib').Path + if path is None: + path = '___pathlib___' + with ensure_clean(path) as path: + writer(Path(path)) + obj = reader(Path(path)) + return obj + + +def round_trip_localpath(writer, reader, path=None): + """ + Write an object to file specified by a py.path LocalPath and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + round_trip_object : pandas object + The original object that was serialized and then re-read. + """ + import pytest + LocalPath = pytest.importorskip('py.path').local + if path is None: + path = '___localpath___' + with ensure_clean(path) as path: + writer(LocalPath(path)) + obj = reader(LocalPath(path)) + return obj + + +@contextmanager +def decompress_file(path, compression): + """ + Open a compressed file and return a file object + + Parameters + ---------- + path : str + The path where the file is read from + + compression : {'gzip', 'bz2', 'zip', 'xz', None} + Name of the decompression to use + + Returns + ------- + f : file object + """ + + if compression is None: + f = open(path, 'rb') + elif compression == 'gzip': + import gzip + f = gzip.open(path, 'rb') + elif compression == 'bz2': + import bz2 + f = bz2.BZ2File(path, 'rb') + elif compression == 'xz': + lzma = compat.import_lzma() + f = lzma.LZMAFile(path, 'rb') + elif compression == 'zip': + import zipfile + zip_file = zipfile.ZipFile(path) + zip_names = zip_file.namelist() + if len(zip_names) == 1: + f = zip_file.open(zip_names.pop()) + else: + raise ValueError('ZIP file {} error. Only one file per ZIP.' + .format(path)) + else: + msg = 'Unrecognized compression type: {}'.format(compression) + raise ValueError(msg) + + try: + yield f + finally: + f.close() + if compression == "zip": + zip_file.close() + + +def write_to_compressed(compression, path, data, dest="test"): + """ + Write data to a compressed file. + + Parameters + ---------- + compression : {'gzip', 'bz2', 'zip', 'xz'} + The compression type to use. + path : str + The file path to write the data. + data : str + The data to write. + dest : str, default "test" + The destination file (for ZIP only) + + Raises + ------ + ValueError : An invalid compression value was passed in. + """ + + if compression == "zip": + import zipfile + compress_method = zipfile.ZipFile + elif compression == "gzip": + import gzip + compress_method = gzip.GzipFile + elif compression == "bz2": + import bz2 + compress_method = bz2.BZ2File + elif compression == "xz": + lzma = compat.import_lzma() + compress_method = lzma.LZMAFile + else: + msg = "Unrecognized compression type: {}".format(compression) + raise ValueError(msg) + + if compression == "zip": + mode = "w" + args = (dest, data) + method = "writestr" + else: + mode = "wb" + args = (data,) + method = "write" + + with compress_method(path, mode=mode) as f: + getattr(f, method)(*args) + + +def randbool(size=(), p=0.5): + return rand(*size) <= p + + +RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), + dtype=(np.str_, 1)) +RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) + + string.digits), dtype=(np.unicode_, 1)) + + +def rands_array(nchars, size, dtype='O'): + """Generate an array of byte strings.""" + retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size)) + .view((np.str_, nchars)).reshape(size)) + if dtype is None: + return retval + else: + return retval.astype(dtype) + + +def randu_array(nchars, size, dtype='O'): + """Generate an array of unicode strings.""" + retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size)) + .view((np.unicode_, nchars)).reshape(size)) + if dtype is None: + return retval + else: + return retval.astype(dtype) + + +def rands(nchars): + """ + Generate one random byte string. + + See `rands_array` if you want to create an array of random strings. + + """ + return ''.join(np.random.choice(RANDS_CHARS, nchars)) + + +def randu(nchars): + """ + Generate one random unicode string. + + See `randu_array` if you want to create an array of random unicode strings. + + """ + return ''.join(np.random.choice(RANDU_CHARS, nchars)) + + +def close(fignum=None): + from matplotlib.pyplot import get_fignums, close as _close + + if fignum is None: + for fignum in get_fignums(): + _close(fignum) + else: + _close(fignum) + + +# ----------------------------------------------------------------------------- +# locale utilities + + +def check_output(*popenargs, **kwargs): + # shamelessly taken from Python 2.7 source + r"""Run command with arguments and return its output as a byte string. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + 'ls: non_existent_file: No such file or directory\n' + """ + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, + *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise subprocess.CalledProcessError(retcode, cmd, output=output) + return output + + +def _default_locale_getter(): + try: + raw_locales = check_output(['locale -a'], shell=True) + except subprocess.CalledProcessError as e: + raise type(e)("{exception}, the 'locale -a' command cannot be found " + "on your system".format(exception=e)) + return raw_locales + + +def get_locales(prefix=None, normalize=True, + locale_getter=_default_locale_getter): + """Get all the locales that are available on the system. + + Parameters + ---------- + prefix : str + If not ``None`` then return only those locales with the prefix + provided. For example to get all English language locales (those that + start with ``"en"``), pass ``prefix="en"``. + normalize : bool + Call ``locale.normalize`` on the resulting list of available locales. + If ``True``, only locales that can be set without throwing an + ``Exception`` are returned. + locale_getter : callable + The function to use to retrieve the current locales. This should return + a string with each locale separated by a newline character. + + Returns + ------- + locales : list of strings + A list of locale strings that can be set with ``locale.setlocale()``. + For example:: + + locale.setlocale(locale.LC_ALL, locale_string) + + On error will return None (no locale available, e.g. Windows) + + """ + try: + raw_locales = locale_getter() + except Exception: + return None + + try: + # raw_locales is "\n" separated list of locales + # it may contain non-decodable parts, so split + # extract what we can and then rejoin. + raw_locales = raw_locales.split(b'\n') + out_locales = [] + for x in raw_locales: + if PY3: + out_locales.append(str( + x, encoding=pd.options.display.encoding)) + else: + out_locales.append(str(x)) + + except TypeError: + pass + + if prefix is None: + return _valid_locales(out_locales, normalize) + + pattern = re.compile('{prefix}.*'.format(prefix=prefix)) + found = pattern.findall('\n'.join(out_locales)) + return _valid_locales(found, normalize) + + +@contextmanager +def set_locale(new_locale, lc_var=locale.LC_ALL): + """Context manager for temporarily setting a locale. + + Parameters + ---------- + new_locale : str or tuple + A string of the form <language_country>.<encoding>. For example to set + the current locale to US English with a UTF8 encoding, you would pass + "en_US.UTF-8". + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Notes + ----- + This is useful when you want to run a particular block of code under a + particular locale, without globally setting the locale. This probably isn't + thread-safe. + """ + current_locale = locale.getlocale() + + try: + locale.setlocale(lc_var, new_locale) + normalized_locale = locale.getlocale() + if com._all_not_none(*normalized_locale): + yield '.'.join(normalized_locale) + else: + yield new_locale + finally: + locale.setlocale(lc_var, current_locale) + + +def can_set_locale(lc, lc_var=locale.LC_ALL): + """ + Check to see if we can set a locale, and subsequently get the locale, + without raising an Exception. + + Parameters + ---------- + lc : str + The locale to attempt to set. + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Returns + ------- + is_valid : bool + Whether the passed locale can be set + """ + + try: + with set_locale(lc, lc_var=lc_var): + pass + except (ValueError, + locale.Error): # horrible name for a Exception subclass + return False + else: + return True + + +def _valid_locales(locales, normalize): + """Return a list of normalized locales that do not throw an ``Exception`` + when set. + + Parameters + ---------- + locales : str + A string where each locale is separated by a newline. + normalize : bool + Whether to call ``locale.normalize`` on each locale. + + Returns + ------- + valid_locales : list + A list of valid locales. + """ + if normalize: + normalizer = lambda x: locale.normalize(x.strip()) + else: + normalizer = lambda x: x.strip() + + return list(filter(can_set_locale, map(normalizer, locales))) + +# ----------------------------------------------------------------------------- +# Stdout / stderr decorators + + +@contextmanager +def set_defaultencoding(encoding): + """ + Set default encoding (as given by sys.getdefaultencoding()) to the given + encoding; restore on exit. + + Parameters + ---------- + encoding : str + """ + if not PY2: + raise ValueError("set_defaultencoding context is only available " + "in Python 2.") + orig = sys.getdefaultencoding() + reload(sys) # noqa:F821 + sys.setdefaultencoding(encoding) + try: + yield + finally: + sys.setdefaultencoding(orig) + + +# ----------------------------------------------------------------------------- +# Console debugging tools + + +def debug(f, *args, **kwargs): + from pdb import Pdb as OldPdb + try: + from IPython.core.debugger import Pdb + kw = dict(color_scheme='Linux') + except ImportError: + Pdb = OldPdb + kw = {} + pdb = Pdb(**kw) + return pdb.runcall(f, *args, **kwargs) + + +def pudebug(f, *args, **kwargs): + import pudb + return pudb.runcall(f, *args, **kwargs) + + +def set_trace(): + from IPython.core.debugger import Pdb + try: + Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) + except Exception: + from pdb import Pdb as OldPdb + OldPdb().set_trace(sys._getframe().f_back) + +# ----------------------------------------------------------------------------- +# contextmanager to ensure the file cleanup + + +@contextmanager +def ensure_clean(filename=None, return_filelike=False): + """Gets a temporary path and agrees to remove on close. + + Parameters + ---------- + filename : str (optional) + if None, creates a temporary file which is then removed when out of + scope. if passed, creates temporary file with filename as ending. + return_filelike : bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. + """ + filename = filename or '' + fd = None + + if return_filelike: + f = tempfile.TemporaryFile(suffix=filename) + try: + yield f + finally: + f.close() + else: + # don't generate tempfile if using a path with directory specified + if len(os.path.dirname(filename)): + raise ValueError("Can't pass a qualified name to ensure_clean()") + + try: + fd, filename = tempfile.mkstemp(suffix=filename) + except UnicodeEncodeError: + import pytest + pytest.skip('no unicode file names on this system') + + try: + yield filename + finally: + try: + os.close(fd) + except Exception: + print("Couldn't close file descriptor: {fdesc} (file: {fname})" + .format(fdesc=fd, fname=filename)) + try: + if os.path.exists(filename): + os.remove(filename) + except Exception as e: + print("Exception on removing file: {error}".format(error=e)) + + +@contextmanager +def ensure_clean_dir(): + """ + Get a temporary directory path and agrees to remove on close. + + Yields + ------ + Temporary directory path + """ + directory_name = tempfile.mkdtemp(suffix='') + try: + yield directory_name + finally: + try: + rmtree(directory_name) + except Exception: + pass + + +@contextmanager +def ensure_safe_environment_variables(): + """ + Get a context manager to safely set environment variables + + All changes will be undone on close, hence environment variables set + within this contextmanager will neither persist nor change global state. + """ + saved_environ = dict(os.environ) + try: + yield + finally: + os.environ.clear() + os.environ.update(saved_environ) + + +# ----------------------------------------------------------------------------- +# Comparators + + +def equalContents(arr1, arr2): + """Checks if the set of unique elements of arr1 and arr2 are equivalent. + """ + return frozenset(arr1) == frozenset(arr2) + + +def isiterable(obj): + return hasattr(obj, '__iter__') + + +def is_sorted(seq): + if isinstance(seq, (Index, Series)): + seq = seq.values + # sorting does not change precisions + return assert_numpy_array_equal(seq, np.sort(np.array(seq))) + + +def box_expected(expected, box_cls, transpose=True): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.Index: + expected = pd.Index(expected) + elif box_cls is pd.Series: + expected = pd.Series(expected) + elif box_cls is pd.DataFrame: + expected = pd.Series(expected).to_frame() + if transpose: + # for vector operations, we we need a DataFrame to be a single-row, + # not a single-column, in order to operate against non-DataFrame + # vectors of the same length. + expected = expected.T + elif box_cls is PeriodArray: + # the PeriodArray constructor is not as flexible as period_array + expected = period_array(expected) + elif box_cls is DatetimeArray: + expected = DatetimeArray(expected) + elif box_cls is TimedeltaArray: + expected = TimedeltaArray(expected) + elif box_cls is np.ndarray: + expected = np.array(expected) + elif box_cls is to_array: + expected = to_array(expected) + else: + raise NotImplementedError(box_cls) + return expected + + +def to_array(obj): + # temporary implementation until we get pd.array in place + if is_period_dtype(obj): + return period_array(obj) + elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj): + return DatetimeArray._from_sequence(obj) + elif is_timedelta64_dtype(obj): + return TimedeltaArray._from_sequence(obj) + else: + return np.array(obj) + + +# ----------------------------------------------------------------------------- +# Others + + +def assert_contains_all(iterable, dic): + for k in iterable: + assert k in dic, "Did not contain item: '{key!r}'".format(key=k) + + +def assert_copy(iter1, iter2, **eql_kwargs): + """ + iter1, iter2: iterables that produce elements + comparable with assert_almost_equal + + Checks that the elements are equal, but not + the same object. (Does not check that items + in sequences are also not the same object) + """ + for elem1, elem2 in zip(iter1, iter2): + assert_almost_equal(elem1, elem2, **eql_kwargs) + msg = ("Expected object {obj1!r} and object {obj2!r} to be " + "different objects, but they were the same object." + ).format(obj1=type(elem1), obj2=type(elem2)) + assert elem1 is not elem2, msg + + +def getArangeMat(): + return np.arange(N * K).reshape((N, K)) + + +def all_index_generator(k=10): + """Generator which can be iterated over to get instances of all the various + index classes. + + Parameters + ---------- + k: length of each of the index instances + """ + all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex, + makeUnicodeIndex, makeDateIndex, makePeriodIndex, + makeTimedeltaIndex, makeBoolIndex, makeRangeIndex, + makeIntervalIndex, + makeCategoricalIndex] + for make_index_func in all_make_index_funcs: + yield make_index_func(k=k) + + +def index_subclass_makers_generator(): + make_index_funcs = [ + makeDateIndex, makePeriodIndex, + makeTimedeltaIndex, makeRangeIndex, + makeIntervalIndex, makeCategoricalIndex, + makeMultiIndex + ] + for make_index_func in make_index_funcs: + yield make_index_func + + +def all_timeseries_index_generator(k=10): + """Generator which can be iterated over to get instances of all the classes + which represent time-seires. + + Parameters + ---------- + k: length of each of the index instances + """ + make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex] + for make_index_func in make_index_funcs: + yield make_index_func(k=k) + + +def add_nans(panel): + I, J, N = panel.shape + for i, item in enumerate(panel.items): + dm = panel[item] + for j, col in enumerate(dm.columns): + dm[col][:i + j] = np.NaN + return panel + + +def add_nans_panel4d(panel4d): + for l, label in enumerate(panel4d.labels): + panel = panel4d[label] + add_nans(panel) + return panel4d + + +class TestSubDict(dict): + + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) + + +def optional_args(decorator): + """allows a decorator to take optional positional and keyword arguments. + Assumes that taking a single, callable, positional argument means that + it is decorating a function, i.e. something like this:: + + @my_decorator + def function(): pass + + Calls decorator with decorator(f, *args, **kwargs)""" + + @wraps(decorator) + def wrapper(*args, **kwargs): + def dec(f): + return decorator(f, *args, **kwargs) + + is_decorating = not kwargs and len(args) == 1 and callable(args[0]) + if is_decorating: + f = args[0] + args = [] + return dec(f) + else: + return dec + + return wrapper + + +# skip tests on exceptions with this message +_network_error_messages = ( + # 'urlopen error timed out', + # 'timeout: timed out', + # 'socket.timeout: timed out', + 'timed out', + 'Server Hangup', + 'HTTP Error 503: Service Unavailable', + '502: Proxy Error', + 'HTTP Error 502: internal error', + 'HTTP Error 502', + 'HTTP Error 503', + 'HTTP Error 403', + 'HTTP Error 400', + 'Temporary failure in name resolution', + 'Name or service not known', + 'Connection refused', + 'certificate verify', +) + +# or this e.errno/e.reason.errno +_network_errno_vals = ( + 101, # Network is unreachable + 111, # Connection refused + 110, # Connection timed out + 104, # Connection reset Error + 54, # Connection reset by peer + 60, # urllib.error.URLError: [Errno 60] Connection timed out +) + +# Both of the above shouldn't mask real issues such as 404's +# or refused connections (changed DNS). +# But some tests (test_data yahoo) contact incredibly flakey +# servers. + +# and conditionally raise on these exception types +_network_error_classes = (IOError, httplib.HTTPException) + +if PY3: + _network_error_classes += (TimeoutError,) # noqa + + +def can_connect(url, error_classes=_network_error_classes): + """Try to connect to the given url. True if succeeds, False if IOError + raised + + Parameters + ---------- + url : basestring + The URL to try to connect to + + Returns + ------- + connectable : bool + Return True if no IOError (unable to connect) or URLError (bad url) was + raised + """ + try: + with urlopen(url): + pass + except error_classes: + return False + else: + return True + + +@optional_args +def network(t, url="http://www.google.com", + raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, + check_before_test=False, + error_classes=_network_error_classes, + skip_errnos=_network_errno_vals, + _skip_on_messages=_network_error_messages, + ): + """ + Label a test as requiring network connection and, if an error is + encountered, only raise if it does not find a network connection. + + In comparison to ``network``, this assumes an added contract to your test: + you must assert that, under normal conditions, your test will ONLY fail if + it does not have network connectivity. + + You can call this in 3 ways: as a standard decorator, with keyword + arguments, or with a positional argument that is the url to check. + + Parameters + ---------- + t : callable + The test requiring network connectivity. + url : path + The url to test via ``pandas.io.common.urlopen`` to check + for connectivity. Defaults to 'http://www.google.com'. + raise_on_error : bool + If True, never catches errors. + check_before_test : bool + If True, checks connectivity before running the test case. + error_classes : tuple or Exception + error classes to ignore. If not in ``error_classes``, raises the error. + defaults to IOError. Be careful about changing the error classes here. + skip_errnos : iterable of int + Any exception that has .errno or .reason.erno set to one + of these values will be skipped with an appropriate + message. + _skip_on_messages: iterable of string + any exception e for which one of the strings is + a substring of str(e) will be skipped with an appropriate + message. Intended to suppress errors where an errno isn't available. + + Notes + ----- + * ``raise_on_error`` supercedes ``check_before_test`` + + Returns + ------- + t : callable + The decorated test ``t``, with checks for connectivity errors. + + Example + ------- + + Tests decorated with @network will fail if it's possible to make a network + connection to another URL (defaults to google.com):: + + >>> from pandas.util.testing import network + >>> from pandas.io.common import urlopen + >>> @network + ... def test_network(): + ... with urlopen("rabbit://bonanza.com"): + ... pass + Traceback + ... + URLError: <urlopen error unknown url type: rabit> + + You can specify alternative URLs:: + + >>> @network("http://www.yahoo.com") + ... def test_something_with_yahoo(): + ... raise IOError("Failure Message") + >>> test_something_with_yahoo() + Traceback (most recent call last): + ... + IOError: Failure Message + + If you set check_before_test, it will check the url first and not run the + test on failure:: + + >>> @network("failing://url.blaher", check_before_test=True) + ... def test_something(): + ... print("I ran!") + ... raise ValueError("Failure") + >>> test_something() + Traceback (most recent call last): + ... + + Errors not related to networking will always be raised. + """ + from pytest import skip + t.network = True + + @compat.wraps(t) + def wrapper(*args, **kwargs): + if check_before_test and not raise_on_error: + if not can_connect(url, error_classes): + skip() + try: + return t(*args, **kwargs) + except Exception as e: + errno = getattr(e, 'errno', None) + if not errno and hasattr(errno, "reason"): + errno = getattr(e.reason, 'errno', None) + + if errno in skip_errnos: + skip("Skipping test due to known errno" + " and error {error}".format(error=e)) + + try: + e_str = traceback.format_exc(e) + except Exception: + e_str = str(e) + + if any(m.lower() in e_str.lower() for m in _skip_on_messages): + skip("Skipping test because exception " + "message is known and error {error}".format(error=e)) + + if not isinstance(e, error_classes): + raise + + if raise_on_error or can_connect(url, error_classes): + raise + else: + skip("Skipping test due to lack of connectivity" + " and error {error}".format(error=e)) + + return wrapper + + +with_connectivity_check = network + + +class RNGContext(object): + """ + Context manager to set the numpy random number generator speed. Returns + to the original value upon exiting the context manager. + + Parameters + ---------- + seed : int + Seed for numpy.random.seed + + Examples + -------- + + with RNGContext(42): + np.random.randn() + """ + + def __init__(self, seed): + self.seed = seed + + def __enter__(self): + + self.start_state = np.random.get_state() + np.random.seed(self.seed) + + def __exit__(self, exc_type, exc_value, traceback): + + np.random.set_state(self.start_state) + + +@contextmanager +def with_csv_dialect(name, **kwargs): + """ + Context manager to temporarily register a CSV dialect for parsing CSV. + + Parameters + ---------- + name : str + The name of the dialect. + kwargs : mapping + The parameters for the dialect. + + Raises + ------ + ValueError : the name of the dialect conflicts with a builtin one. + + See Also + -------- + csv : Python's CSV library. + """ + import csv + _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} + + if name in _BUILTIN_DIALECTS: + raise ValueError("Cannot override builtin dialect.") + + csv.register_dialect(name, **kwargs) + yield + csv.unregister_dialect(name) + + +@contextmanager +def use_numexpr(use, min_elements=None): + from pandas.core.computation import expressions as expr + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + + olduse = expr._USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + expr.set_use_numexpr(use) + expr._MIN_ELEMENTS = min_elements + yield + expr._MIN_ELEMENTS = oldmin + expr.set_use_numexpr(olduse) + + +def test_parallel(num_threads=2, kwargs_list=None): + """Decorator to run the same function multiple times in parallel. + + Parameters + ---------- + num_threads : int, optional + The number of times the function is run in parallel. + kwargs_list : list of dicts, optional + The list of kwargs to update original + function kwargs on different threads. + Notes + ----- + This decorator does not pass the return value of the decorated function. + + Original from scikit-image: + + https://github.com/scikit-image/scikit-image/pull/1519 + + """ + + assert num_threads > 0 + has_kwargs_list = kwargs_list is not None + if has_kwargs_list: + assert len(kwargs_list) == num_threads + import threading + + def wrapper(func): + @wraps(func) + def inner(*args, **kwargs): + if has_kwargs_list: + update_kwargs = lambda i: dict(kwargs, **kwargs_list[i]) + else: + update_kwargs = lambda i: kwargs + threads = [] + for i in range(num_threads): + updated_kwargs = update_kwargs(i) + thread = threading.Thread(target=func, args=args, + kwargs=updated_kwargs) + threads.append(thread) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + return inner + return wrapper + + +class SubclassedSeries(Series): + _metadata = ['testattr', 'name'] + + @property + def _constructor(self): + return SubclassedSeries + + @property + def _constructor_expanddim(self): + return SubclassedDataFrame + + +class SubclassedDataFrame(DataFrame): + _metadata = ['testattr'] + + @property + def _constructor(self): + return SubclassedDataFrame + + @property + def _constructor_sliced(self): + return SubclassedSeries + + +class SubclassedSparseSeries(pd.SparseSeries): + _metadata = ['testattr'] + + @property + def _constructor(self): + return SubclassedSparseSeries + + @property + def _constructor_expanddim(self): + return SubclassedSparseDataFrame + + +class SubclassedSparseDataFrame(pd.SparseDataFrame): + _metadata = ['testattr'] + + @property + def _constructor(self): + return SubclassedSparseDataFrame + + @property + def _constructor_sliced(self): + return SubclassedSparseSeries + + +class SubclassedCategorical(Categorical): + + @property + def _constructor(self): + return SubclassedCategorical + + +@contextmanager +def set_timezone(tz): + """Context manager for temporarily setting a timezone. + + Parameters + ---------- + tz : str + A string representing a valid timezone. + + Examples + -------- + + >>> from datetime import datetime + >>> from dateutil.tz import tzlocal + >>> tzlocal().tzname(datetime.now()) + 'IST' + + >>> with set_timezone('US/Eastern'): + ... tzlocal().tzname(datetime.now()) + ... + 'EDT' + """ + + import os + import time + + def setTZ(tz): + if tz is None: + try: + del os.environ['TZ'] + except KeyError: + pass + else: + os.environ['TZ'] = tz + time.tzset() + + orig_tz = os.environ.get('TZ') + setTZ(tz) + try: + yield + finally: + setTZ(orig_tz) + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + skipna_wrapper : function + """ + if skipna_alternative: + def skipna_wrapper(x): + return skipna_alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper + + +def convert_rows_list_to_csv_str(rows_list): + """ + Convert list of CSV rows to single CSV-formatted string for current OS. + + This method is used for creating expected value of to_csv() method. + + Parameters + ---------- + rows_list : list + The list of string. Each element represents the row of csv. + + Returns + ------- + expected : string + Expected output of to_csv() in current OS + """ + sep = os.linesep + expected = sep.join(rows_list) + sep + return expected diff --git a/pandas/util/testing.py b/pandas/util/testing/asserters.py similarity index 51% rename from pandas/util/testing.py rename to pandas/util/testing/asserters.py index a6ba62bbdea1e..1e108a272a91b 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing/asserters.py @@ -1,259 +1,89 @@ -from __future__ import division - from contextlib import contextmanager -from datetime import datetime -from functools import wraps -import locale -import os import re -from shutil import rmtree -import string -import subprocess -import sys -import tempfile -import traceback import warnings import numpy as np -from numpy.random import rand, randn from pandas._libs import testing as _testing import pandas.compat as compat -from pandas.compat import ( - PY2, PY3, Counter, callable, filter, httplib, lmap, lrange, lzip, map, - raise_with_traceback, range, string_types, u, unichr, zip) +from pandas.compat import PY2, raise_with_traceback, range, string_types, zip from pandas.core.dtypes.common import ( - is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, + is_bool, is_categorical_dtype, is_datetime64tz_dtype, is_datetimelike_v_numeric, is_datetimelike_v_object, is_extension_array_dtype, is_interval_dtype, is_list_like, is_number, - is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion) + needs_i8_conversion) from pandas.core.dtypes.missing import array_equivalent import pandas as pd -from pandas import ( - Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, - IntervalIndex, MultiIndex, Panel, RangeIndex, Series, bdate_range) +from pandas import Categorical, DataFrame, Index, Series from pandas.core.algorithms import take_1d from pandas.core.arrays import ( DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray, - PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array) -import pandas.core.common as com + PeriodArray, TimedeltaArrayMixin as TimedeltaArray) -from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing -N = 30 -K = 4 -_RAISE_NETWORK_ERROR_DEFAULT = False - -# set testing_mode -_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning) - - -def set_testing_mode(): - # set the testing mode filters - testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None') - if 'deprecate' in testing_mode: - warnings.simplefilter('always', _testing_mode_warnings) - - -def reset_testing_mode(): - # reset the testing mode filters - testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None') - if 'deprecate' in testing_mode: - warnings.simplefilter('ignore', _testing_mode_warnings) - - -set_testing_mode() - - -def reset_display_options(): - """ - Reset the display options for printing and representing objects. - """ - - pd.reset_option('^display.', silent=True) - - -def round_trip_pickle(obj, path=None): - """ - Pickle an object and then read it again. - - Parameters - ---------- - obj : pandas object - The object to pickle and then re-read. - path : str, default None - The path where the pickled object is written and then read. - - Returns - ------- - round_trip_pickled_object : pandas object - The original object that was pickled and then re-read. - """ - - if path is None: - path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10))) - with ensure_clean(path) as path: - pd.to_pickle(obj, path) - return pd.read_pickle(path) - - -def round_trip_pathlib(writer, reader, path=None): - """ - Write an object to file specified by a pathlib.Path and read it back - - Parameters - ---------- - writer : callable bound to pandas object - IO writing function (e.g. DataFrame.to_csv ) - reader : callable - IO reading function (e.g. pd.read_csv ) - path : str, default None - The path where the object is written and then read. - - Returns - ------- - round_trip_object : pandas object - The original object that was serialized and then re-read. - """ - - import pytest - Path = pytest.importorskip('pathlib').Path - if path is None: - path = '___pathlib___' - with ensure_clean(path) as path: - writer(Path(path)) - obj = reader(Path(path)) - return obj - -def round_trip_localpath(writer, reader, path=None): +def _check_isinstance(left, right, cls): """ - Write an object to file specified by a py.path LocalPath and read it back + Helper method for our assert_* methods that ensures that + the two objects being compared have the right type before + proceeding with the comparison. Parameters ---------- - writer : callable bound to pandas object - IO writing function (e.g. DataFrame.to_csv ) - reader : callable - IO reading function (e.g. pd.read_csv ) - path : str, default None - The path where the object is written and then read. - - Returns - ------- - round_trip_object : pandas object - The original object that was serialized and then re-read. - """ - import pytest - LocalPath = pytest.importorskip('py.path').local - if path is None: - path = '___localpath___' - with ensure_clean(path) as path: - writer(LocalPath(path)) - obj = reader(LocalPath(path)) - return obj - + left : The first object being compared. + right : The second object being compared. + cls : The class type to check against. -@contextmanager -def decompress_file(path, compression): + Raises + ------ + AssertionError : Either `left` or `right` is not an instance of `cls`. """ - Open a compressed file and return a file object - Parameters - ---------- - path : str - The path where the file is read from + err_msg = "{name} Expected type {exp_type}, found {act_type} instead" + cls_name = cls.__name__ - compression : {'gzip', 'bz2', 'zip', 'xz', None} - Name of the decompression to use + if not isinstance(left, cls): + raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, + act_type=type(left))) + if not isinstance(right, cls): + raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, + act_type=type(right))) - Returns - ------- - f : file object - """ - if compression is None: - f = open(path, 'rb') - elif compression == 'gzip': - import gzip - f = gzip.open(path, 'rb') - elif compression == 'bz2': - import bz2 - f = bz2.BZ2File(path, 'rb') - elif compression == 'xz': - lzma = compat.import_lzma() - f = lzma.LZMAFile(path, 'rb') - elif compression == 'zip': - import zipfile - zip_file = zipfile.ZipFile(path) - zip_names = zip_file.namelist() - if len(zip_names) == 1: - f = zip_file.open(zip_names.pop()) - else: - raise ValueError('ZIP file {} error. Only one file per ZIP.' - .format(path)) - else: - msg = 'Unrecognized compression type: {}'.format(compression) - raise ValueError(msg) +def raise_assert_detail(obj, message, left, right, diff=None): + __tracebackhide__ = True - try: - yield f - finally: - f.close() - if compression == "zip": - zip_file.close() + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif is_categorical_dtype(left): + left = repr(left) + if PY2 and isinstance(left, string_types): + # left needs to be printable in native text type in python2 + left = left.encode('utf-8') -def write_to_compressed(compression, path, data, dest="test"): - """ - Write data to a compressed file. + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif is_categorical_dtype(right): + right = repr(right) - Parameters - ---------- - compression : {'gzip', 'bz2', 'zip', 'xz'} - The compression type to use. - path : str - The file path to write the data. - data : str - The data to write. - dest : str, default "test" - The destination file (for ZIP only) + if PY2 and isinstance(right, string_types): + # right needs to be printable in native text type in python2 + right = right.encode('utf-8') - Raises - ------ - ValueError : An invalid compression value was passed in. - """ + msg = """{obj} are different - if compression == "zip": - import zipfile - compress_method = zipfile.ZipFile - elif compression == "gzip": - import gzip - compress_method = gzip.GzipFile - elif compression == "bz2": - import bz2 - compress_method = bz2.BZ2File - elif compression == "xz": - lzma = compat.import_lzma() - compress_method = lzma.LZMAFile - else: - msg = "Unrecognized compression type: {}".format(compression) - raise ValueError(msg) +{message} +[left]: {left} +[right]: {right}""".format(obj=obj, message=message, left=left, right=right) - if compression == "zip": - mode = "w" - args = (dest, data) - method = "writestr" - else: - mode = "wb" - args = (data,) - method = "write" + if diff is not None: + msg += "\n[diff]: {diff}".format(diff=diff) - with compress_method(path, mode=mode) as f: - getattr(f, method)(*args) + raise AssertionError(msg) def assert_almost_equal(left, right, check_dtype="equiv", @@ -328,441 +158,11 @@ def assert_almost_equal(left, right, check_dtype="equiv", **kwargs) -def _check_isinstance(left, right, cls): - """ - Helper method for our assert_* methods that ensures that - the two objects being compared have the right type before - proceeding with the comparison. - - Parameters - ---------- - left : The first object being compared. - right : The second object being compared. - cls : The class type to check against. - - Raises - ------ - AssertionError : Either `left` or `right` is not an instance of `cls`. - """ - - err_msg = "{name} Expected type {exp_type}, found {act_type} instead" - cls_name = cls.__name__ - - if not isinstance(left, cls): - raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, - act_type=type(left))) - if not isinstance(right, cls): - raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, - act_type=type(right))) - - def assert_dict_equal(left, right, compare_keys=True): - _check_isinstance(left, right, dict) return _testing.assert_dict_equal(left, right, compare_keys=compare_keys) -def randbool(size=(), p=0.5): - return rand(*size) <= p - - -RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), - dtype=(np.str_, 1)) -RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) + - string.digits), dtype=(np.unicode_, 1)) - - -def rands_array(nchars, size, dtype='O'): - """Generate an array of byte strings.""" - retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size)) - .view((np.str_, nchars)).reshape(size)) - if dtype is None: - return retval - else: - return retval.astype(dtype) - - -def randu_array(nchars, size, dtype='O'): - """Generate an array of unicode strings.""" - retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size)) - .view((np.unicode_, nchars)).reshape(size)) - if dtype is None: - return retval - else: - return retval.astype(dtype) - - -def rands(nchars): - """ - Generate one random byte string. - - See `rands_array` if you want to create an array of random strings. - - """ - return ''.join(np.random.choice(RANDS_CHARS, nchars)) - - -def randu(nchars): - """ - Generate one random unicode string. - - See `randu_array` if you want to create an array of random unicode strings. - - """ - return ''.join(np.random.choice(RANDU_CHARS, nchars)) - - -def close(fignum=None): - from matplotlib.pyplot import get_fignums, close as _close - - if fignum is None: - for fignum in get_fignums(): - _close(fignum) - else: - _close(fignum) - - -# ----------------------------------------------------------------------------- -# locale utilities - - -def check_output(*popenargs, **kwargs): - # shamelessly taken from Python 2.7 source - r"""Run command with arguments and return its output as a byte string. - - If the exit code was non-zero it raises a CalledProcessError. The - CalledProcessError object will have the return code in the returncode - attribute and output in the output attribute. - - The arguments are the same as for the Popen constructor. Example: - - >>> check_output(["ls", "-l", "/dev/null"]) - 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' - - The stdout argument is not allowed as it is used internally. - To capture standard error in the result, use stderr=STDOUT. - - >>> check_output(["/bin/sh", "-c", - ... "ls -l non_existent_file ; exit 0"], - ... stderr=STDOUT) - 'ls: non_existent_file: No such file or directory\n' - """ - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, - *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd, output=output) - return output - - -def _default_locale_getter(): - try: - raw_locales = check_output(['locale -a'], shell=True) - except subprocess.CalledProcessError as e: - raise type(e)("{exception}, the 'locale -a' command cannot be found " - "on your system".format(exception=e)) - return raw_locales - - -def get_locales(prefix=None, normalize=True, - locale_getter=_default_locale_getter): - """Get all the locales that are available on the system. - - Parameters - ---------- - prefix : str - If not ``None`` then return only those locales with the prefix - provided. For example to get all English language locales (those that - start with ``"en"``), pass ``prefix="en"``. - normalize : bool - Call ``locale.normalize`` on the resulting list of available locales. - If ``True``, only locales that can be set without throwing an - ``Exception`` are returned. - locale_getter : callable - The function to use to retrieve the current locales. This should return - a string with each locale separated by a newline character. - - Returns - ------- - locales : list of strings - A list of locale strings that can be set with ``locale.setlocale()``. - For example:: - - locale.setlocale(locale.LC_ALL, locale_string) - - On error will return None (no locale available, e.g. Windows) - - """ - try: - raw_locales = locale_getter() - except Exception: - return None - - try: - # raw_locales is "\n" separated list of locales - # it may contain non-decodable parts, so split - # extract what we can and then rejoin. - raw_locales = raw_locales.split(b'\n') - out_locales = [] - for x in raw_locales: - if PY3: - out_locales.append(str( - x, encoding=pd.options.display.encoding)) - else: - out_locales.append(str(x)) - - except TypeError: - pass - - if prefix is None: - return _valid_locales(out_locales, normalize) - - pattern = re.compile('{prefix}.*'.format(prefix=prefix)) - found = pattern.findall('\n'.join(out_locales)) - return _valid_locales(found, normalize) - - -@contextmanager -def set_locale(new_locale, lc_var=locale.LC_ALL): - """Context manager for temporarily setting a locale. - - Parameters - ---------- - new_locale : str or tuple - A string of the form <language_country>.<encoding>. For example to set - the current locale to US English with a UTF8 encoding, you would pass - "en_US.UTF-8". - lc_var : int, default `locale.LC_ALL` - The category of the locale being set. - - Notes - ----- - This is useful when you want to run a particular block of code under a - particular locale, without globally setting the locale. This probably isn't - thread-safe. - """ - current_locale = locale.getlocale() - - try: - locale.setlocale(lc_var, new_locale) - normalized_locale = locale.getlocale() - if com._all_not_none(*normalized_locale): - yield '.'.join(normalized_locale) - else: - yield new_locale - finally: - locale.setlocale(lc_var, current_locale) - - -def can_set_locale(lc, lc_var=locale.LC_ALL): - """ - Check to see if we can set a locale, and subsequently get the locale, - without raising an Exception. - - Parameters - ---------- - lc : str - The locale to attempt to set. - lc_var : int, default `locale.LC_ALL` - The category of the locale being set. - - Returns - ------- - is_valid : bool - Whether the passed locale can be set - """ - - try: - with set_locale(lc, lc_var=lc_var): - pass - except (ValueError, - locale.Error): # horrible name for a Exception subclass - return False - else: - return True - - -def _valid_locales(locales, normalize): - """Return a list of normalized locales that do not throw an ``Exception`` - when set. - - Parameters - ---------- - locales : str - A string where each locale is separated by a newline. - normalize : bool - Whether to call ``locale.normalize`` on each locale. - - Returns - ------- - valid_locales : list - A list of valid locales. - """ - if normalize: - normalizer = lambda x: locale.normalize(x.strip()) - else: - normalizer = lambda x: x.strip() - - return list(filter(can_set_locale, map(normalizer, locales))) - -# ----------------------------------------------------------------------------- -# Stdout / stderr decorators - - -@contextmanager -def set_defaultencoding(encoding): - """ - Set default encoding (as given by sys.getdefaultencoding()) to the given - encoding; restore on exit. - - Parameters - ---------- - encoding : str - """ - if not PY2: - raise ValueError("set_defaultencoding context is only available " - "in Python 2.") - orig = sys.getdefaultencoding() - reload(sys) # noqa:F821 - sys.setdefaultencoding(encoding) - try: - yield - finally: - sys.setdefaultencoding(orig) - - -# ----------------------------------------------------------------------------- -# Console debugging tools - - -def debug(f, *args, **kwargs): - from pdb import Pdb as OldPdb - try: - from IPython.core.debugger import Pdb - kw = dict(color_scheme='Linux') - except ImportError: - Pdb = OldPdb - kw = {} - pdb = Pdb(**kw) - return pdb.runcall(f, *args, **kwargs) - - -def pudebug(f, *args, **kwargs): - import pudb - return pudb.runcall(f, *args, **kwargs) - - -def set_trace(): - from IPython.core.debugger import Pdb - try: - Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) - except Exception: - from pdb import Pdb as OldPdb - OldPdb().set_trace(sys._getframe().f_back) - -# ----------------------------------------------------------------------------- -# contextmanager to ensure the file cleanup - - -@contextmanager -def ensure_clean(filename=None, return_filelike=False): - """Gets a temporary path and agrees to remove on close. - - Parameters - ---------- - filename : str (optional) - if None, creates a temporary file which is then removed when out of - scope. if passed, creates temporary file with filename as ending. - return_filelike : bool (default False) - if True, returns a file-like which is *always* cleaned. Necessary for - savefig and other functions which want to append extensions. - """ - filename = filename or '' - fd = None - - if return_filelike: - f = tempfile.TemporaryFile(suffix=filename) - try: - yield f - finally: - f.close() - else: - # don't generate tempfile if using a path with directory specified - if len(os.path.dirname(filename)): - raise ValueError("Can't pass a qualified name to ensure_clean()") - - try: - fd, filename = tempfile.mkstemp(suffix=filename) - except UnicodeEncodeError: - import pytest - pytest.skip('no unicode file names on this system') - - try: - yield filename - finally: - try: - os.close(fd) - except Exception: - print("Couldn't close file descriptor: {fdesc} (file: {fname})" - .format(fdesc=fd, fname=filename)) - try: - if os.path.exists(filename): - os.remove(filename) - except Exception as e: - print("Exception on removing file: {error}".format(error=e)) - - -@contextmanager -def ensure_clean_dir(): - """ - Get a temporary directory path and agrees to remove on close. - - Yields - ------ - Temporary directory path - """ - directory_name = tempfile.mkdtemp(suffix='') - try: - yield directory_name - finally: - try: - rmtree(directory_name) - except Exception: - pass - - -@contextmanager -def ensure_safe_environment_variables(): - """ - Get a context manager to safely set environment variables - - All changes will be undone on close, hence environment variables set - within this contextmanager will neither persist nor change global state. - """ - saved_environ = dict(os.environ) - try: - yield - finally: - os.environ.clear() - os.environ.update(saved_environ) - - -# ----------------------------------------------------------------------------- -# Comparators - - -def equalContents(arr1, arr2): - """Checks if the set of unique elements of arr1 and arr2 are equivalent. - """ - return frozenset(arr1) == frozenset(arr2) - - def assert_index_equal(left, right, exact='equiv', check_names=True, check_less_precise=False, check_exact=True, check_categorical=True, obj='Index'): @@ -961,17 +361,6 @@ def assert_is_valid_plot_return_object(objs): .format(name=objs.__class__.__name__)) -def isiterable(obj): - return hasattr(obj, '__iter__') - - -def is_sorted(seq): - if isinstance(seq, (Index, Series)): - seq = seq.values - # sorting does not change precisions - return assert_numpy_array_equal(seq, np.sort(np.array(seq))) - - def assert_categorical_equal(left, right, check_dtype=True, check_category_order=True, obj='Categorical'): """Test that Categoricals are equivalent. @@ -1061,39 +450,6 @@ def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'): assert_attr_equal('freq', left, right, obj=obj) -def raise_assert_detail(obj, message, left, right, diff=None): - __tracebackhide__ = True - - if isinstance(left, np.ndarray): - left = pprint_thing(left) - elif is_categorical_dtype(left): - left = repr(left) - - if PY2 and isinstance(left, string_types): - # left needs to be printable in native text type in python2 - left = left.encode('utf-8') - - if isinstance(right, np.ndarray): - right = pprint_thing(right) - elif is_categorical_dtype(right): - right = repr(right) - - if PY2 and isinstance(right, string_types): - # right needs to be printable in native text type in python2 - right = right.encode('utf-8') - - msg = """{obj} are different - -{message} -[left]: {left} -[right]: {right}""".format(obj=obj, message=message, left=left, right=right) - - if diff is not None: - msg += "\n[diff]: {diff}".format(diff=diff) - - raise AssertionError(msg) - - def assert_numpy_array_equal(left, right, strict_nan=False, check_dtype=True, err_msg=None, check_same=None, obj='numpy array'): @@ -1587,63 +943,11 @@ def assert_equal(left, right, **kwargs): elif isinstance(left, TimedeltaArray): assert_timedelta_array_equal(left, right, **kwargs) elif isinstance(left, ExtensionArray): - assert_extension_array_equal(left, right, **kwargs) - elif isinstance(left, np.ndarray): - assert_numpy_array_equal(left, right, **kwargs) - else: - raise NotImplementedError(type(left)) - - -def box_expected(expected, box_cls, transpose=True): - """ - Helper function to wrap the expected output of a test in a given box_class. - - Parameters - ---------- - expected : np.ndarray, Index, Series - box_cls : {Index, Series, DataFrame} - - Returns - ------- - subclass of box_cls - """ - if box_cls is pd.Index: - expected = pd.Index(expected) - elif box_cls is pd.Series: - expected = pd.Series(expected) - elif box_cls is pd.DataFrame: - expected = pd.Series(expected).to_frame() - if transpose: - # for vector operations, we we need a DataFrame to be a single-row, - # not a single-column, in order to operate against non-DataFrame - # vectors of the same length. - expected = expected.T - elif box_cls is PeriodArray: - # the PeriodArray constructor is not as flexible as period_array - expected = period_array(expected) - elif box_cls is DatetimeArray: - expected = DatetimeArray(expected) - elif box_cls is TimedeltaArray: - expected = TimedeltaArray(expected) - elif box_cls is np.ndarray: - expected = np.array(expected) - elif box_cls is to_array: - expected = to_array(expected) - else: - raise NotImplementedError(box_cls) - return expected - - -def to_array(obj): - # temporary implementation until we get pd.array in place - if is_period_dtype(obj): - return period_array(obj) - elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj): - return DatetimeArray._from_sequence(obj) - elif is_timedelta64_dtype(obj): - return TimedeltaArray._from_sequence(obj) + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) else: - return np.array(obj) + raise NotImplementedError(type(left)) # ----------------------------------------------------------------------------- @@ -1829,735 +1133,7 @@ def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True, # assert(left.default_kind == right.default_kind) for col in right: - assert (col in left) - -# ----------------------------------------------------------------------------- -# Others - - -def assert_contains_all(iterable, dic): - for k in iterable: - assert k in dic, "Did not contain item: '{key!r}'".format(key=k) - - -def assert_copy(iter1, iter2, **eql_kwargs): - """ - iter1, iter2: iterables that produce elements - comparable with assert_almost_equal - - Checks that the elements are equal, but not - the same object. (Does not check that items - in sequences are also not the same object) - """ - for elem1, elem2 in zip(iter1, iter2): - assert_almost_equal(elem1, elem2, **eql_kwargs) - msg = ("Expected object {obj1!r} and object {obj2!r} to be " - "different objects, but they were the same object." - ).format(obj1=type(elem1), obj2=type(elem2)) - assert elem1 is not elem2, msg - - -def getCols(k): - return string.ascii_uppercase[:k] - - -def getArangeMat(): - return np.arange(N * K).reshape((N, K)) - - -# make index -def makeStringIndex(k=10, name=None): - return Index(rands_array(nchars=10, size=k), name=name) - - -def makeUnicodeIndex(k=10, name=None): - return Index(randu_array(nchars=10, size=k), name=name) - - -def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): - """ make a length k index or n categories """ - x = rands_array(nchars=4, size=n) - return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs) - - -def makeIntervalIndex(k=10, name=None, **kwargs): - """ make a length k IntervalIndex """ - x = np.linspace(0, 100, num=(k + 1)) - return IntervalIndex.from_breaks(x, name=name, **kwargs) - - -def makeBoolIndex(k=10, name=None): - if k == 1: - return Index([True], name=name) - elif k == 2: - return Index([False, True], name=name) - return Index([False, True] + [False] * (k - 2), name=name) - - -def makeIntIndex(k=10, name=None): - return Index(lrange(k), name=name) - - -def makeUIntIndex(k=10, name=None): - return Index([2**63 + i for i in lrange(k)], name=name) - - -def makeRangeIndex(k=10, name=None, **kwargs): - return RangeIndex(0, k, 1, name=name, **kwargs) - - -def makeFloatIndex(k=10, name=None): - values = sorted(np.random.random_sample(k)) - np.random.random_sample(1) - return Index(values * (10 ** np.random.randint(0, 9)), name=name) - - -def makeDateIndex(k=10, freq='B', name=None, **kwargs): - dt = datetime(2000, 1, 1) - dr = bdate_range(dt, periods=k, freq=freq, name=name) - return DatetimeIndex(dr, name=name, **kwargs) - - -def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs): - return pd.timedelta_range(start='1 day', periods=k, freq=freq, - name=name, **kwargs) - - -def makePeriodIndex(k=10, name=None, **kwargs): - dt = datetime(2000, 1, 1) - dr = pd.period_range(start=dt, periods=k, freq='B', name=name, **kwargs) - return dr - - -def makeMultiIndex(k=10, names=None, **kwargs): - return MultiIndex.from_product( - (('foo', 'bar'), (1, 2)), names=names, **kwargs) - - -def all_index_generator(k=10): - """Generator which can be iterated over to get instances of all the various - index classes. - - Parameters - ---------- - k: length of each of the index instances - """ - all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex, - makeUnicodeIndex, makeDateIndex, makePeriodIndex, - makeTimedeltaIndex, makeBoolIndex, makeRangeIndex, - makeIntervalIndex, - makeCategoricalIndex] - for make_index_func in all_make_index_funcs: - yield make_index_func(k=k) - - -def index_subclass_makers_generator(): - make_index_funcs = [ - makeDateIndex, makePeriodIndex, - makeTimedeltaIndex, makeRangeIndex, - makeIntervalIndex, makeCategoricalIndex, - makeMultiIndex - ] - for make_index_func in make_index_funcs: - yield make_index_func - - -def all_timeseries_index_generator(k=10): - """Generator which can be iterated over to get instances of all the classes - which represent time-seires. - - Parameters - ---------- - k: length of each of the index instances - """ - make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex] - for make_index_func in make_index_funcs: - yield make_index_func(k=k) - - -# make series -def makeFloatSeries(name=None): - index = makeStringIndex(N) - return Series(randn(N), index=index, name=name) - - -def makeStringSeries(name=None): - index = makeStringIndex(N) - return Series(randn(N), index=index, name=name) - - -def makeObjectSeries(name=None): - dateIndex = makeDateIndex(N) - dateIndex = Index(dateIndex, dtype=object) - index = makeStringIndex(N) - return Series(dateIndex, index=index, name=name) - - -def getSeriesData(): - index = makeStringIndex(N) - return {c: Series(randn(N), index=index) for c in getCols(K)} - - -def makeTimeSeries(nper=None, freq='B', name=None): - if nper is None: - nper = N - return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name) - - -def makePeriodSeries(nper=None, name=None): - if nper is None: - nper = N - return Series(randn(nper), index=makePeriodIndex(nper), name=name) - - -def getTimeSeriesData(nper=None, freq='B'): - return {c: makeTimeSeries(nper, freq) for c in getCols(K)} - - -def getPeriodData(nper=None): - return {c: makePeriodSeries(nper) for c in getCols(K)} - - -# make frame -def makeTimeDataFrame(nper=None, freq='B'): - data = getTimeSeriesData(nper, freq) - return DataFrame(data) - - -def makeDataFrame(): - data = getSeriesData() - return DataFrame(data) - - -def getMixedTypeDict(): - index = Index(['a', 'b', 'c', 'd', 'e']) - - data = { - 'A': [0., 1., 2., 3., 4.], - 'B': [0., 1., 0., 1., 0.], - 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], - 'D': bdate_range('1/1/2009', periods=5) - } - - return index, data - - -def makeMixedDataFrame(): - return DataFrame(getMixedTypeDict()[1]) - - -def makePeriodFrame(nper=None): - data = getPeriodData(nper) - return DataFrame(data) - - -def makePanel(nper=None): - with warnings.catch_warnings(record=True): - warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) - cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] - data = {c: makeTimeDataFrame(nper) for c in cols} - return Panel.fromDict(data) - - -def makePeriodPanel(nper=None): - with warnings.catch_warnings(record=True): - warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) - cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] - data = {c: makePeriodFrame(nper) for c in cols} - return Panel.fromDict(data) - - -def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, - idx_type=None): - """Create an index/multindex with given dimensions, levels, names, etc' - - nentries - number of entries in index - nlevels - number of levels (> 1 produces multindex) - prefix - a string prefix for labels - names - (Optional), bool or list of strings. if True will use default - names, if false will use no names, if a list is given, the name of - each level in the index will be taken from the list. - ndupe_l - (Optional), list of ints, the number of rows for which the - label will repeated at the corresponding level, you can specify just - the first few, the rest will use the default ndupe_l of 1. - len(ndupe_l) <= nlevels. - idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td". - If idx_type is not None, `idx_nlevels` must be 1. - "i"/"f" creates an integer/float index, - "s"/"u" creates a string/unicode index - "dt" create a datetime index. - "td" create a datetime index. - - if unspecified, string labels will be generated. - """ - - if ndupe_l is None: - ndupe_l = [1] * nlevels - assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels) - assert (names is None or names is False or - names is True or len(names) is nlevels) - assert idx_type is None or (idx_type in ('i', 'f', 's', 'u', - 'dt', 'p', 'td') - and nlevels == 1) - - if names is True: - # build default names - names = [prefix + str(i) for i in range(nlevels)] - if names is False: - # pass None to index constructor for no name - names = None - - # make singelton case uniform - if isinstance(names, compat.string_types) and nlevels == 1: - names = [names] - - # specific 1D index type requested? - idx_func = dict(i=makeIntIndex, f=makeFloatIndex, - s=makeStringIndex, u=makeUnicodeIndex, - dt=makeDateIndex, td=makeTimedeltaIndex, - p=makePeriodIndex).get(idx_type) - if idx_func: - idx = idx_func(nentries) - # but we need to fill in the name - if names: - idx.name = names[0] - return idx - elif idx_type is not None: - raise ValueError('"{idx_type}" is not a legal value for `idx_type`, ' - 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".' - .format(idx_type=idx_type)) - - if len(ndupe_l) < nlevels: - ndupe_l.extend([1] * (nlevels - len(ndupe_l))) - assert len(ndupe_l) == nlevels - - assert all(x > 0 for x in ndupe_l) - - tuples = [] - for i in range(nlevels): - def keyfunc(x): - import re - numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_") - return lmap(int, numeric_tuple) - - # build a list of lists to create the index from - div_factor = nentries // ndupe_l[i] + 1 - cnt = Counter() - for j in range(div_factor): - label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j) - cnt[label] = ndupe_l[i] - # cute Counter trick - result = list(sorted(cnt.elements(), key=keyfunc))[:nentries] - tuples.append(result) - - tuples = lzip(*tuples) - - # convert tuples to index - if nentries == 1: - # we have a single level of tuples, i.e. a regular Index - index = Index(tuples[0], name=names[0]) - elif nlevels == 1: - name = None if names is None else names[0] - index = Index((x[0] for x in tuples), name=name) - else: - index = MultiIndex.from_tuples(tuples, names=names) - return index - - -def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True, - c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, - c_ndupe_l=None, r_ndupe_l=None, dtype=None, - c_idx_type=None, r_idx_type=None): - """ - nrows, ncols - number of data rows/cols - c_idx_names, idx_names - False/True/list of strings, yields No names , - default names or uses the provided names for the levels of the - corresponding index. You can provide a single string when - c_idx_nlevels ==1. - c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex - r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex - data_gen_f - a function f(row,col) which return the data value - at that position, the default generator used yields values of the form - "RxCy" based on position. - c_ndupe_l, r_ndupe_l - list of integers, determines the number - of duplicates for each label at a given level of the corresponding - index. The default `None` value produces a multiplicity of 1 across - all levels, i.e. a unique index. Will accept a partial list of length - N < idx_nlevels, for just the first N levels. If ndupe doesn't divide - nrows/ncol, the last label might have lower multiplicity. - dtype - passed to the DataFrame constructor as is, in case you wish to - have more control in conjuncion with a custom `data_gen_f` - r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td". - If idx_type is not None, `idx_nlevels` must be 1. - "i"/"f" creates an integer/float index, - "s"/"u" creates a string/unicode index - "dt" create a datetime index. - "td" create a timedelta index. - - if unspecified, string labels will be generated. - - Examples: - - # 5 row, 3 columns, default names on both, single index on both axis - >> makeCustomDataframe(5,3) - - # make the data a random int between 1 and 100 - >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100)) - - # 2-level multiindex on rows with each label duplicated - # twice on first level, default names on both axis, single - # index on both axis - >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2]) - - # DatetimeIndex on row, index with unicode labels on columns - # no names on either axis - >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False, - r_idx_type="dt",c_idx_type="u") - - # 4-level multindex on rows with names provided, 2-level multindex - # on columns with default labels and default names. - >> a=makeCustomDataframe(5,3,r_idx_nlevels=4, - r_idx_names=["FEE","FI","FO","FAM"], - c_idx_nlevels=2) - - >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) - """ - - assert c_idx_nlevels > 0 - assert r_idx_nlevels > 0 - assert r_idx_type is None or (r_idx_type in ('i', 'f', 's', - 'u', 'dt', 'p', 'td') - and r_idx_nlevels == 1) - assert c_idx_type is None or (c_idx_type in ('i', 'f', 's', - 'u', 'dt', 'p', 'td') - and c_idx_nlevels == 1) - - columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C', - names=c_idx_names, ndupe_l=c_ndupe_l, - idx_type=c_idx_type) - index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R', - names=r_idx_names, ndupe_l=r_ndupe_l, - idx_type=r_idx_type) - - # by default, generate data based on location - if data_gen_f is None: - data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c) - - data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] - - return DataFrame(data, index, columns, dtype=dtype) - - -def _create_missing_idx(nrows, ncols, density, random_state=None): - if random_state is None: - random_state = np.random - else: - random_state = np.random.RandomState(random_state) - - # below is cribbed from scipy.sparse - size = int(np.round((1 - density) * nrows * ncols)) - # generate a few more to ensure unique values - min_rows = 5 - fac = 1.02 - extra_size = min(size + min_rows, fac * size) - - def _gen_unique_rand(rng, _extra_size): - ind = rng.rand(int(_extra_size)) - return np.unique(np.floor(ind * nrows * ncols))[:size] - - ind = _gen_unique_rand(random_state, extra_size) - while ind.size < size: - extra_size *= 1.05 - ind = _gen_unique_rand(random_state, extra_size) - - j = np.floor(ind * 1. / nrows).astype(int) - i = (ind - j * nrows).astype(int) - return i.tolist(), j.tolist() - - -def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None, - c_idx_names=True, r_idx_names=True, - c_idx_nlevels=1, r_idx_nlevels=1, - data_gen_f=None, - c_ndupe_l=None, r_ndupe_l=None, dtype=None, - c_idx_type=None, r_idx_type=None): - """ - Parameters - ---------- - Density : float, optional - Float in (0, 1) that gives the percentage of non-missing numbers in - the DataFrame. - random_state : {np.random.RandomState, int}, optional - Random number generator or random seed. - - See makeCustomDataframe for descriptions of the rest of the parameters. - """ - df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names, - r_idx_names=r_idx_names, - c_idx_nlevels=c_idx_nlevels, - r_idx_nlevels=r_idx_nlevels, - data_gen_f=data_gen_f, - c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l, - dtype=dtype, c_idx_type=c_idx_type, - r_idx_type=r_idx_type) - - i, j = _create_missing_idx(nrows, ncols, density, random_state) - df.values[i, j] = np.nan - return df - - -def makeMissingDataframe(density=.9, random_state=None): - df = makeDataFrame() - i, j = _create_missing_idx(*df.shape, density=density, - random_state=random_state) - df.values[i, j] = np.nan - return df - - -def add_nans(panel): - I, J, N = panel.shape - for i, item in enumerate(panel.items): - dm = panel[item] - for j, col in enumerate(dm.columns): - dm[col][:i + j] = np.NaN - return panel - - -def add_nans_panel4d(panel4d): - for l, label in enumerate(panel4d.labels): - panel = panel4d[label] - add_nans(panel) - return panel4d - - -class TestSubDict(dict): - - def __init__(self, *args, **kwargs): - dict.__init__(self, *args, **kwargs) - - -def optional_args(decorator): - """allows a decorator to take optional positional and keyword arguments. - Assumes that taking a single, callable, positional argument means that - it is decorating a function, i.e. something like this:: - - @my_decorator - def function(): pass - - Calls decorator with decorator(f, *args, **kwargs)""" - - @wraps(decorator) - def wrapper(*args, **kwargs): - def dec(f): - return decorator(f, *args, **kwargs) - - is_decorating = not kwargs and len(args) == 1 and callable(args[0]) - if is_decorating: - f = args[0] - args = [] - return dec(f) - else: - return dec - - return wrapper - - -# skip tests on exceptions with this message -_network_error_messages = ( - # 'urlopen error timed out', - # 'timeout: timed out', - # 'socket.timeout: timed out', - 'timed out', - 'Server Hangup', - 'HTTP Error 503: Service Unavailable', - '502: Proxy Error', - 'HTTP Error 502: internal error', - 'HTTP Error 502', - 'HTTP Error 503', - 'HTTP Error 403', - 'HTTP Error 400', - 'Temporary failure in name resolution', - 'Name or service not known', - 'Connection refused', - 'certificate verify', -) - -# or this e.errno/e.reason.errno -_network_errno_vals = ( - 101, # Network is unreachable - 111, # Connection refused - 110, # Connection timed out - 104, # Connection reset Error - 54, # Connection reset by peer - 60, # urllib.error.URLError: [Errno 60] Connection timed out -) - -# Both of the above shouldn't mask real issues such as 404's -# or refused connections (changed DNS). -# But some tests (test_data yahoo) contact incredibly flakey -# servers. - -# and conditionally raise on these exception types -_network_error_classes = (IOError, httplib.HTTPException) - -if PY3: - _network_error_classes += (TimeoutError,) # noqa - - -def can_connect(url, error_classes=_network_error_classes): - """Try to connect to the given url. True if succeeds, False if IOError - raised - - Parameters - ---------- - url : basestring - The URL to try to connect to - - Returns - ------- - connectable : bool - Return True if no IOError (unable to connect) or URLError (bad url) was - raised - """ - try: - with urlopen(url): - pass - except error_classes: - return False - else: - return True - - -@optional_args -def network(t, url="http://www.google.com", - raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, - check_before_test=False, - error_classes=_network_error_classes, - skip_errnos=_network_errno_vals, - _skip_on_messages=_network_error_messages, - ): - """ - Label a test as requiring network connection and, if an error is - encountered, only raise if it does not find a network connection. - - In comparison to ``network``, this assumes an added contract to your test: - you must assert that, under normal conditions, your test will ONLY fail if - it does not have network connectivity. - - You can call this in 3 ways: as a standard decorator, with keyword - arguments, or with a positional argument that is the url to check. - - Parameters - ---------- - t : callable - The test requiring network connectivity. - url : path - The url to test via ``pandas.io.common.urlopen`` to check - for connectivity. Defaults to 'http://www.google.com'. - raise_on_error : bool - If True, never catches errors. - check_before_test : bool - If True, checks connectivity before running the test case. - error_classes : tuple or Exception - error classes to ignore. If not in ``error_classes``, raises the error. - defaults to IOError. Be careful about changing the error classes here. - skip_errnos : iterable of int - Any exception that has .errno or .reason.erno set to one - of these values will be skipped with an appropriate - message. - _skip_on_messages: iterable of string - any exception e for which one of the strings is - a substring of str(e) will be skipped with an appropriate - message. Intended to suppress errors where an errno isn't available. - - Notes - ----- - * ``raise_on_error`` supercedes ``check_before_test`` - - Returns - ------- - t : callable - The decorated test ``t``, with checks for connectivity errors. - - Example - ------- - - Tests decorated with @network will fail if it's possible to make a network - connection to another URL (defaults to google.com):: - - >>> from pandas.util.testing import network - >>> from pandas.io.common import urlopen - >>> @network - ... def test_network(): - ... with urlopen("rabbit://bonanza.com"): - ... pass - Traceback - ... - URLError: <urlopen error unknown url type: rabit> - - You can specify alternative URLs:: - - >>> @network("http://www.yahoo.com") - ... def test_something_with_yahoo(): - ... raise IOError("Failure Message") - >>> test_something_with_yahoo() - Traceback (most recent call last): - ... - IOError: Failure Message - - If you set check_before_test, it will check the url first and not run the - test on failure:: - - >>> @network("failing://url.blaher", check_before_test=True) - ... def test_something(): - ... print("I ran!") - ... raise ValueError("Failure") - >>> test_something() - Traceback (most recent call last): - ... - - Errors not related to networking will always be raised. - """ - from pytest import skip - t.network = True - - @compat.wraps(t) - def wrapper(*args, **kwargs): - if check_before_test and not raise_on_error: - if not can_connect(url, error_classes): - skip() - try: - return t(*args, **kwargs) - except Exception as e: - errno = getattr(e, 'errno', None) - if not errno and hasattr(errno, "reason"): - errno = getattr(e.reason, 'errno', None) - - if errno in skip_errnos: - skip("Skipping test due to known errno" - " and error {error}".format(error=e)) - - try: - e_str = traceback.format_exc(e) - except Exception: - e_str = str(e) - - if any(m.lower() in e_str.lower() for m in _skip_on_messages): - skip("Skipping test because exception " - "message is known and error {error}".format(error=e)) - - if not isinstance(e, error_classes): - raise - - if raise_on_error or can_connect(url, error_classes): - raise - else: - skip("Skipping test due to lack of connectivity" - " and error {error}".format(error=e)) - - return wrapper - - -with_connectivity_check = network + assert col in left def assert_raises_regex(_exception, _regexp, _callable=None, @@ -2800,274 +1376,3 @@ class for all warnings. To check that no warning is returned, assert saw_warning, msg assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}." ).format(extra=extra_warnings) - - -class RNGContext(object): - """ - Context manager to set the numpy random number generator speed. Returns - to the original value upon exiting the context manager. - - Parameters - ---------- - seed : int - Seed for numpy.random.seed - - Examples - -------- - - with RNGContext(42): - np.random.randn() - """ - - def __init__(self, seed): - self.seed = seed - - def __enter__(self): - - self.start_state = np.random.get_state() - np.random.seed(self.seed) - - def __exit__(self, exc_type, exc_value, traceback): - - np.random.set_state(self.start_state) - - -@contextmanager -def with_csv_dialect(name, **kwargs): - """ - Context manager to temporarily register a CSV dialect for parsing CSV. - - Parameters - ---------- - name : str - The name of the dialect. - kwargs : mapping - The parameters for the dialect. - - Raises - ------ - ValueError : the name of the dialect conflicts with a builtin one. - - See Also - -------- - csv : Python's CSV library. - """ - import csv - _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} - - if name in _BUILTIN_DIALECTS: - raise ValueError("Cannot override builtin dialect.") - - csv.register_dialect(name, **kwargs) - yield - csv.unregister_dialect(name) - - -@contextmanager -def use_numexpr(use, min_elements=None): - from pandas.core.computation import expressions as expr - if min_elements is None: - min_elements = expr._MIN_ELEMENTS - - olduse = expr._USE_NUMEXPR - oldmin = expr._MIN_ELEMENTS - expr.set_use_numexpr(use) - expr._MIN_ELEMENTS = min_elements - yield - expr._MIN_ELEMENTS = oldmin - expr.set_use_numexpr(olduse) - - -def test_parallel(num_threads=2, kwargs_list=None): - """Decorator to run the same function multiple times in parallel. - - Parameters - ---------- - num_threads : int, optional - The number of times the function is run in parallel. - kwargs_list : list of dicts, optional - The list of kwargs to update original - function kwargs on different threads. - Notes - ----- - This decorator does not pass the return value of the decorated function. - - Original from scikit-image: - - https://github.com/scikit-image/scikit-image/pull/1519 - - """ - - assert num_threads > 0 - has_kwargs_list = kwargs_list is not None - if has_kwargs_list: - assert len(kwargs_list) == num_threads - import threading - - def wrapper(func): - @wraps(func) - def inner(*args, **kwargs): - if has_kwargs_list: - update_kwargs = lambda i: dict(kwargs, **kwargs_list[i]) - else: - update_kwargs = lambda i: kwargs - threads = [] - for i in range(num_threads): - updated_kwargs = update_kwargs(i) - thread = threading.Thread(target=func, args=args, - kwargs=updated_kwargs) - threads.append(thread) - for thread in threads: - thread.start() - for thread in threads: - thread.join() - return inner - return wrapper - - -class SubclassedSeries(Series): - _metadata = ['testattr', 'name'] - - @property - def _constructor(self): - return SubclassedSeries - - @property - def _constructor_expanddim(self): - return SubclassedDataFrame - - -class SubclassedDataFrame(DataFrame): - _metadata = ['testattr'] - - @property - def _constructor(self): - return SubclassedDataFrame - - @property - def _constructor_sliced(self): - return SubclassedSeries - - -class SubclassedSparseSeries(pd.SparseSeries): - _metadata = ['testattr'] - - @property - def _constructor(self): - return SubclassedSparseSeries - - @property - def _constructor_expanddim(self): - return SubclassedSparseDataFrame - - -class SubclassedSparseDataFrame(pd.SparseDataFrame): - _metadata = ['testattr'] - - @property - def _constructor(self): - return SubclassedSparseDataFrame - - @property - def _constructor_sliced(self): - return SubclassedSparseSeries - - -class SubclassedCategorical(Categorical): - - @property - def _constructor(self): - return SubclassedCategorical - - -@contextmanager -def set_timezone(tz): - """Context manager for temporarily setting a timezone. - - Parameters - ---------- - tz : str - A string representing a valid timezone. - - Examples - -------- - - >>> from datetime import datetime - >>> from dateutil.tz import tzlocal - >>> tzlocal().tzname(datetime.now()) - 'IST' - - >>> with set_timezone('US/Eastern'): - ... tzlocal().tzname(datetime.now()) - ... - 'EDT' - """ - - import os - import time - - def setTZ(tz): - if tz is None: - try: - del os.environ['TZ'] - except KeyError: - pass - else: - os.environ['TZ'] = tz - time.tzset() - - orig_tz = os.environ.get('TZ') - setTZ(tz) - try: - yield - finally: - setTZ(orig_tz) - - -def _make_skipna_wrapper(alternative, skipna_alternative=None): - """Create a function for calling on an array. - - Parameters - ---------- - alternative : function - The function to be called on the array with no NaNs. - Only used when 'skipna_alternative' is None. - skipna_alternative : function - The function to be called on the original array - - Returns - ------- - skipna_wrapper : function - """ - if skipna_alternative: - def skipna_wrapper(x): - return skipna_alternative(x.values) - else: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) - - return skipna_wrapper - - -def convert_rows_list_to_csv_str(rows_list): - """ - Convert list of CSV rows to single CSV-formatted string for current OS. - - This method is used for creating expected value of to_csv() method. - - Parameters - ---------- - rows_list : list - The list of string. Each element represents the row of csv. - - Returns - ------- - expected : string - Expected output of to_csv() in current OS - """ - sep = os.linesep - expected = sep.join(rows_list) + sep - return expected diff --git a/pandas/util/testing/strategies.py b/pandas/util/testing/strategies.py new file mode 100644 index 0000000000000..93c5d4b7547f7 --- /dev/null +++ b/pandas/util/testing/strategies.py @@ -0,0 +1,459 @@ +""" +strategies for creating test data. + +`strategies` name derived from `hypothesis` +""" +from datetime import datetime +import string +import warnings + +import numpy as np + +import pandas.compat as compat +from pandas.compat import lrange, lmap, Counter, lzip, u + +from pandas.core.dtypes.common import is_sequence + +import pandas as pd + + +N = 30 +K = 4 + + +RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), + dtype=(np.str_, 1)) +RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) + + string.digits), dtype=(np.unicode_, 1)) + + +def rands_array(nchars, size, dtype='O'): + """Generate an array of byte strings.""" + retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size)) + .view((np.str_, nchars)).reshape(size)) + if dtype is None: + return retval + else: + return retval.astype(dtype) + + +def randu_array(nchars, size, dtype='O'): + """Generate an array of unicode strings.""" + retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size)) + .view((np.unicode_, nchars)).reshape(size)) + if dtype is None: + return retval + else: + return retval.astype(dtype) + + +def getCols(k): + return string.ascii_uppercase[:k] + + +def makeStringIndex(k=10, name=None): + return pd.Index(rands_array(nchars=10, size=k), name=name) + + +def makeUnicodeIndex(k=10, name=None): + return pd.Index(randu_array(nchars=10, size=k), name=name) + + +def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): + """ make a length k index or n categories """ + x = rands_array(nchars=4, size=n) + return pd.CategoricalIndex(np.random.choice(x, k), name=name, **kwargs) + + +def makeIntervalIndex(k=10, name=None, **kwargs): + """ make a length k IntervalIndex """ + x = np.linspace(0, 100, num=(k + 1)) + return pd.IntervalIndex.from_breaks(x, name=name, **kwargs) + + +def makeBoolIndex(k=10, name=None): + if k == 1: + return pd.Index([True], name=name) + elif k == 2: + return pd.Index([False, True], name=name) + return pd.Index([False, True] + [False] * (k - 2), name=name) + + +def makeIntIndex(k=10, name=None): + return pd.Index(lrange(k), name=name) + + +def makeUIntIndex(k=10, name=None): + return pd.Index([2**63 + i for i in lrange(k)], name=name) + + +def makeRangeIndex(k=10, name=None, **kwargs): + return pd.RangeIndex(0, k, 1, name=name, **kwargs) + + +def makeFloatIndex(k=10, name=None): + values = sorted(np.random.random_sample(k)) - np.random.random_sample(1) + return pd.Index(values * (10 ** np.random.randint(0, 9)), name=name) + + +def makeDateIndex(k=10, freq='B', name=None, **kwargs): + dt = datetime(2000, 1, 1) + dr = pd.bdate_range(dt, periods=k, freq=freq, name=name) + return pd.DatetimeIndex(dr, name=name, **kwargs) + + +def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs): + return pd.timedelta_range(start='1 day', periods=k, freq=freq, + name=name, **kwargs) + + +def makePeriodIndex(k=10, name=None, **kwargs): + dt = datetime(2000, 1, 1) + dr = pd.period_range(start=dt, periods=k, freq='B', name=name, **kwargs) + return dr + + +def makeMultiIndex(k=10, names=None, **kwargs): + return pd.MultiIndex.from_product( + (('foo', 'bar'), (1, 2)), names=names, **kwargs) + + +def makeFloatSeries(name=None): + index = makeStringIndex(N) + return pd.Series(np.random.randn(N), index=index, name=name) + + +def makeStringSeries(name=None): + index = makeStringIndex(N) + return pd.Series(np.random.randn(N), index=index, name=name) + + +def makeObjectSeries(name=None): + dateIndex = makeDateIndex(N) + dateIndex = pd.Index(dateIndex, dtype=object) + index = makeStringIndex(N) + return pd.Series(dateIndex, index=index, name=name) + + +def getSeriesData(): + index = makeStringIndex(N) + return {c: pd.Series(np.random.randn(N), index=index) for c in getCols(K)} + + +def makeTimeSeries(nper=None, freq='B', name=None): + if nper is None: + nper = N + return pd.Series(np.random.randn(nper), + index=makeDateIndex(nper, freq=freq), + name=name) + + +def makePeriodSeries(nper=None, name=None): + if nper is None: + nper = N + return pd.Series(np.random.randn(nper), + index=makePeriodIndex(nper), + name=name) + + +def getTimeSeriesData(nper=None, freq='B'): + return {c: makeTimeSeries(nper, freq) for c in getCols(K)} + + +def getPeriodData(nper=None): + return {c: makePeriodSeries(nper) for c in getCols(K)} + + +def makeTimeDataFrame(nper=None, freq='B'): + data = getTimeSeriesData(nper, freq) + return pd.DataFrame(data) + + +def makeDataFrame(): + data = getSeriesData() + return pd.DataFrame(data) + + +def getMixedTypeDict(): + index = pd.Index(['a', 'b', 'c', 'd', 'e']) + + data = { + 'A': [0., 1., 2., 3., 4.], + 'B': [0., 1., 0., 1., 0.], + 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], + 'D': pd.bdate_range('1/1/2009', periods=5) + } + + return index, data + + +def makeMixedDataFrame(): + return pd.DataFrame(getMixedTypeDict()[1]) + + +def makePeriodFrame(nper=None): + data = getPeriodData(nper) + return pd.DataFrame(data) + + +def makePanel(nper=None): + with warnings.catch_warnings(record=True): + warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) + cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] + data = {c: makeTimeDataFrame(nper) for c in cols} + return pd.Panel.fromDict(data) + + +def makePeriodPanel(nper=None): + with warnings.catch_warnings(record=True): + warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) + cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] + data = {c: makePeriodFrame(nper) for c in cols} + return pd.Panel.fromDict(data) + + +def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, + idx_type=None): + """Create an index/multindex with given dimensions, levels, names, etc' + + nentries - number of entries in index + nlevels - number of levels (> 1 produces multindex) + prefix - a string prefix for labels + names - (Optional), bool or list of strings. if True will use default + names, if false will use no names, if a list is given, the name of + each level in the index will be taken from the list. + ndupe_l - (Optional), list of ints, the number of rows for which the + label will repeated at the corresponding level, you can specify just + the first few, the rest will use the default ndupe_l of 1. + len(ndupe_l) <= nlevels. + idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td". + If idx_type is not None, `idx_nlevels` must be 1. + "i"/"f" creates an integer/float index, + "s"/"u" creates a string/unicode index + "dt" create a datetime index. + "td" create a datetime index. + + if unspecified, string labels will be generated. + """ + + if ndupe_l is None: + ndupe_l = [1] * nlevels + assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels) + assert (names is None or names is False or + names is True or len(names) is nlevels) + assert idx_type is None or (idx_type in ('i', 'f', 's', 'u', + 'dt', 'p', 'td') + and nlevels == 1) + + if names is True: + # build default names + names = [prefix + str(i) for i in range(nlevels)] + if names is False: + # pass None to index constructor for no name + names = None + + # make singelton case uniform + if isinstance(names, compat.string_types) and nlevels == 1: + names = [names] + + # specific 1D index type requested? + idx_func = dict(i=makeIntIndex, f=makeFloatIndex, + s=makeStringIndex, u=makeUnicodeIndex, + dt=makeDateIndex, td=makeTimedeltaIndex, + p=makePeriodIndex).get(idx_type) + if idx_func: + idx = idx_func(nentries) + # but we need to fill in the name + if names: + idx.name = names[0] + return idx + elif idx_type is not None: + raise ValueError('"{idx_type}" is not a legal value for `idx_type`, ' + 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".' + .format(idx_type=idx_type)) + + if len(ndupe_l) < nlevels: + ndupe_l.extend([1] * (nlevels - len(ndupe_l))) + assert len(ndupe_l) == nlevels + + assert all(x > 0 for x in ndupe_l) + + tuples = [] + for i in range(nlevels): + def keyfunc(x): + import re + numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_") + return lmap(int, numeric_tuple) + + # build a list of lists to create the index from + div_factor = nentries // ndupe_l[i] + 1 + cnt = Counter() + for j in range(div_factor): + label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j) + cnt[label] = ndupe_l[i] + # cute Counter trick + result = list(sorted(cnt.elements(), key=keyfunc))[:nentries] + tuples.append(result) + + tuples = lzip(*tuples) + + # convert tuples to index + if nentries == 1: + # we have a single level of tuples, i.e. a regular Index + index = pd.Index(tuples[0], name=names[0]) + elif nlevels == 1: + name = None if names is None else names[0] + index = pd.Index((x[0] for x in tuples), name=name) + else: + index = pd.MultiIndex.from_tuples(tuples, names=names) + return index + + +def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True, + c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, + c_ndupe_l=None, r_ndupe_l=None, dtype=None, + c_idx_type=None, r_idx_type=None): + """ + nrows, ncols - number of data rows/cols + c_idx_names, idx_names - False/True/list of strings, yields No names , + default names or uses the provided names for the levels of the + corresponding index. You can provide a single string when + c_idx_nlevels ==1. + c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex + r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex + data_gen_f - a function f(row,col) which return the data value + at that position, the default generator used yields values of the form + "RxCy" based on position. + c_ndupe_l, r_ndupe_l - list of integers, determines the number + of duplicates for each label at a given level of the corresponding + index. The default `None` value produces a multiplicity of 1 across + all levels, i.e. a unique index. Will accept a partial list of length + N < idx_nlevels, for just the first N levels. If ndupe doesn't divide + nrows/ncol, the last label might have lower multiplicity. + dtype - passed to the DataFrame constructor as is, in case you wish to + have more control in conjuncion with a custom `data_gen_f` + r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td". + If idx_type is not None, `idx_nlevels` must be 1. + "i"/"f" creates an integer/float index, + "s"/"u" creates a string/unicode index + "dt" create a datetime index. + "td" create a timedelta index. + + if unspecified, string labels will be generated. + + Examples: + + # 5 row, 3 columns, default names on both, single index on both axis + >> makeCustomDataframe(5,3) + + # make the data a random int between 1 and 100 + >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100)) + + # 2-level multiindex on rows with each label duplicated + # twice on first level, default names on both axis, single + # index on both axis + >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2]) + + # DatetimeIndex on row, index with unicode labels on columns + # no names on either axis + >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False, + r_idx_type="dt",c_idx_type="u") + + # 4-level multindex on rows with names provided, 2-level multindex + # on columns with default labels and default names. + >> a=makeCustomDataframe(5,3,r_idx_nlevels=4, + r_idx_names=["FEE","FI","FO","FAM"], + c_idx_nlevels=2) + + >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) + """ + + assert c_idx_nlevels > 0 + assert r_idx_nlevels > 0 + assert r_idx_type is None or (r_idx_type in ('i', 'f', 's', + 'u', 'dt', 'p', 'td') + and r_idx_nlevels == 1) + assert c_idx_type is None or (c_idx_type in ('i', 'f', 's', + 'u', 'dt', 'p', 'td') + and c_idx_nlevels == 1) + + columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C', + names=c_idx_names, ndupe_l=c_ndupe_l, + idx_type=c_idx_type) + index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R', + names=r_idx_names, ndupe_l=r_ndupe_l, + idx_type=r_idx_type) + + # by default, generate data based on location + if data_gen_f is None: + data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c) + + data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] + + return pd.DataFrame(data, index, columns, dtype=dtype) + + +def _create_missing_idx(nrows, ncols, density, random_state=None): + if random_state is None: + random_state = np.random + else: + random_state = np.random.RandomState(random_state) + + # below is cribbed from scipy.sparse + size = int(np.round((1 - density) * nrows * ncols)) + # generate a few more to ensure unique values + min_rows = 5 + fac = 1.02 + extra_size = min(size + min_rows, fac * size) + + def _gen_unique_rand(rng, _extra_size): + ind = rng.rand(int(_extra_size)) + return np.unique(np.floor(ind * nrows * ncols))[:size] + + ind = _gen_unique_rand(random_state, extra_size) + while ind.size < size: + extra_size *= 1.05 + ind = _gen_unique_rand(random_state, extra_size) + + j = np.floor(ind * 1. / nrows).astype(int) + i = (ind - j * nrows).astype(int) + return i.tolist(), j.tolist() + + +def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None, + c_idx_names=True, r_idx_names=True, + c_idx_nlevels=1, r_idx_nlevels=1, + data_gen_f=None, + c_ndupe_l=None, r_ndupe_l=None, dtype=None, + c_idx_type=None, r_idx_type=None): + """ + Parameters + ---------- + Density : float, optional + Float in (0, 1) that gives the percentage of non-missing numbers in + the DataFrame. + random_state : {np.random.RandomState, int}, optional + Random number generator or random seed. + + See makeCustomDataframe for descriptions of the rest of the parameters. + """ + df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names, + r_idx_names=r_idx_names, + c_idx_nlevels=c_idx_nlevels, + r_idx_nlevels=r_idx_nlevels, + data_gen_f=data_gen_f, + c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l, + dtype=dtype, c_idx_type=c_idx_type, + r_idx_type=r_idx_type) + + i, j = _create_missing_idx(nrows, ncols, density, random_state) + df.values[i, j] = np.nan + return df + + +def makeMissingDataframe(density=.9, random_state=None): + df = makeDataFrame() + i, j = _create_missing_idx(*df.shape, density=density, + random_state=random_state) + df.values[i, j] = np.nan + return df
In order to parametrize a bunch of tests that currently loop over e.g. `for obj in self.objs`, a bunch of `setup_class` stuff is going to have to go into `tm`. Instead of making that big file even bigger, this first splits it into three pieces.
https://api.github.com/repos/pandas-dev/pandas/pulls/24575
2019-01-03T00:17:02Z
2019-01-03T01:12:21Z
null
2019-09-17T18:57:49Z
CLN: Follow-ups to #24024
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 8d85b84ec7507..94d716a08d9dc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -350,9 +350,6 @@ def unique(values): if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() - elif is_datetime64tz_dtype(values): - # TODO: merge this check into the previous one following #24024 - return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index d233e1d09a1e9..517c80619baea 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -47,10 +47,6 @@ def cmp_method(self, other): if isinstance(other, ABCDataFrame): return NotImplemented - if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, cls)): - if other.ndim > 0 and len(self) != len(other): - raise ValueError('Lengths must match to compare') - if needs_i8_conversion(self) and needs_i8_conversion(other): # we may need to directly compare underlying # representations @@ -586,10 +582,6 @@ def view(self, dtype=None): # ------------------------------------------------------------------ # ExtensionArray Interface - # TODO: - # * _from_sequence - # * argsort / _values_for_argsort - # * _reduce def unique(self): result = unique1d(self.asi8) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index f42930929747d..ea2742c5808a3 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -280,8 +280,7 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): ) raise ValueError(msg.format(values.dtype)) - dtype = pandas_dtype(dtype) - _validate_dt64_dtype(dtype) + dtype = _validate_dt64_dtype(dtype) if freq == "infer": msg = ( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e7c03de879e8a..3e782c6ef89e0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3082,7 +3082,7 @@ def _box_item_values(self, key, values): def _maybe_cache_changed(self, item, value): """The object has called back to us saying maybe it has changed. """ - self._data.set(item, value, check=False) + self._data.set(item, value) @property def _is_cached(self): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a26daba49f5d1..c702eae5da012 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -68,8 +68,7 @@ def cmp_method(self, other): if other.ndim > 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') - from .multi import MultiIndex - if is_object_dtype(self) and not isinstance(self, MultiIndex): + if is_object_dtype(self) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) @@ -1307,8 +1306,7 @@ def set_names(self, names, level=None, inplace=False): names=['species', 'year']) """ - from .multi import MultiIndex - if level is not None and not isinstance(self, MultiIndex): + if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError('Level must be None for non-MultiIndex') if level is not None and not is_list_like(level) and is_list_like( @@ -3145,9 +3143,8 @@ def _reindex_non_unique(self, target): @Appender(_index_shared_docs['join']) def join(self, other, how='left', level=None, return_indexers=False, sort=False): - from .multi import MultiIndex - self_is_mi = isinstance(self, MultiIndex) - other_is_mi = isinstance(other, MultiIndex) + self_is_mi = isinstance(self, ABCMultiIndex) + other_is_mi = isinstance(other, ABCMultiIndex) # try to figure out the join level # GH3662 @@ -4394,8 +4391,7 @@ def groupby(self, values): # TODO: if we are a MultiIndex, we can do better # that converting to tuples - from .multi import MultiIndex - if isinstance(values, MultiIndex): + if isinstance(values, ABCMultiIndex): values = values.values values = ensure_categorical(values) result = values._reverse_indexer() diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index daca4b5116027..5547266ea6bab 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -31,23 +31,24 @@ _index_doc_kwargs = dict(ibase._index_doc_kwargs) -def ea_passthrough(name): +def ea_passthrough(array_method): """ Make an alias for a method of the underlying ExtensionArray. Parameters ---------- - name : str + array_method : method on an Array class Returns ------- method """ + def method(self, *args, **kwargs): - return getattr(self._eadata, name)(*args, **kwargs) + return array_method(self._data, *args, **kwargs) - method.__name__ = name - # TODO: docstrings + method.__name__ = array_method.__name__ + method.__doc__ = array_method.__doc__ return method @@ -67,9 +68,10 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): _resolution = cache_readonly(DatetimeLikeArrayMixin._resolution.fget) resolution = cache_readonly(DatetimeLikeArrayMixin.resolution.fget) - _box_values = ea_passthrough("_box_values") - _maybe_mask_results = ea_passthrough("_maybe_mask_results") - __iter__ = ea_passthrough("__iter__") + _box_values = ea_passthrough(DatetimeLikeArrayMixin._box_values) + _maybe_mask_results = ea_passthrough( + DatetimeLikeArrayMixin._maybe_mask_results) + __iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__) @property def _eadata(self): @@ -275,9 +277,6 @@ def sort_values(self, return_indexer=False, ascending=True): if not ascending: sorted_values = sorted_values[::-1] - sorted_values = self._maybe_box_as_values(sorted_values, - **attribs) - return self._simple_new(sorted_values, **attribs) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) @@ -613,14 +612,6 @@ def _concat_same_dtype(self, to_concat, name): new_data = type(self._values)._concat_same_type(to_concat).asi8 return self._simple_new(new_data, **attribs) - def _maybe_box_as_values(self, values, **attribs): - # TODO(DatetimeArray): remove - # This is a temporary shim while PeriodArray is an ExtensoinArray, - # but others are not. When everyone is an ExtensionArray, this can - # be removed. Currently used in - # - sort_values - return values - @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): if is_dtype_equal(self.dtype, dtype) and copy is False: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index a6a910f66359c..6d9829d4ef659 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -356,36 +356,6 @@ def tz(self, value): tzinfo = tz - @property - def size(self): - # TODO: Remove this when we have a DatetimeTZArray - # Necessary to avoid recursion error since DTI._values is a DTI - # for TZ-aware - return self._ndarray_values.size - - @property - def shape(self): - # TODO: Remove this when we have a DatetimeTZArray - # Necessary to avoid recursion error since DTI._values is a DTI - # for TZ-aware - return self._ndarray_values.shape - - @property - def nbytes(self): - # TODO: Remove this when we have a DatetimeTZArray - # Necessary to avoid recursion error since DTI._values is a DTI - # for TZ-aware - return self._ndarray_values.nbytes - - def memory_usage(self, deep=False): - # TODO: Remove this when we have a DatetimeTZArray - # Necessary to avoid recursion error since DTI._values is a DTI - # for TZ-aware - result = self._ndarray_values.nbytes - # include our engine hashtable - result += self._engine.sizeof(deep=deep) - return result - @cache_readonly def _is_dates_only(self): """Return a boolean if we are only dates (and don't have a timezone)""" @@ -455,11 +425,11 @@ def _mpl_repr(self): def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(self, date_format) + fmt = _get_format_datetime64_from_values(self, date_format) return libts.format_array_from_datetime(self.asi8, tz=self.tz, - format=format, + format=fmt, na_rep=na_rep) @property @@ -1142,9 +1112,9 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_normalized = cache_readonly(DatetimeArray.is_normalized.fget) _resolution = cache_readonly(DatetimeArray._resolution.fget) - strftime = ea_passthrough("strftime") - _has_same_tz = ea_passthrough("_has_same_tz") - __array__ = ea_passthrough("__array__") + strftime = ea_passthrough(DatetimeArray.strftime) + _has_same_tz = ea_passthrough(DatetimeArray._has_same_tz) + __array__ = ea_passthrough(DatetimeArray.__array__) @property def offset(self): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 60059d5a43440..253ce2a28d165 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1468,9 +1468,9 @@ def to_frame(self, index=True, name=None): # Guarantee resulting column order result = DataFrame( OrderedDict([ - ((level if name is None else name), + ((level if lvlname is None else lvlname), self._get_level_values(level)) - for name, level in zip(idx_names, range(len(self.levels))) + for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 5bc76ed210edb..0eeb7551db26f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -357,17 +357,6 @@ def func(x): return Period._from_ordinal(ordinal=x, freq=self.freq) return func - def _maybe_box_as_values(self, values, **attribs): - """Box an array of ordinals to a PeriodArray - - This is purely for compatibility between PeriodIndex - and Datetime/TimedeltaIndex. Once these are all backed by - an ExtensionArray, this can be removed - """ - # TODO(DatetimeArray): remove - freq = attribs['freq'] - return PeriodArray(values, freq=freq) - def _maybe_convert_timedelta(self, other): """ Convert timedelta-like input to an integer multiple of self.freq diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3a3b9ed97c8fe..241d12dd06159 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -303,11 +303,6 @@ def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): _is_monotonic_decreasing = Index.is_monotonic_decreasing _is_unique = Index.is_unique - _create_comparison_method = DatetimeIndexOpsMixin._create_comparison_method - # TODO: make sure we have a test for name retention analogous - # to series.test_arithmetic.test_ser_cmp_result_names; - # also for PeriodIndex which I think may be missing one - @property def _box_func(self): return lambda x: Timedelta(x, unit='ns') diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 7845a62bb7edb..5ce5ae7186774 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -338,7 +338,7 @@ def concat_same_type(self, to_concat, placement=None): def iget(self, i): return self.values[i] - def set(self, locs, values, check=False): + def set(self, locs, values): """ Modify Block in-place with new item value @@ -2416,7 +2416,7 @@ def f(m, v, i): return blocks - def set(self, locs, values, check=False): + def set(self, locs, values): """ Modify Block in-place with new item value @@ -2424,14 +2424,6 @@ def set(self, locs, values, check=False): ------- None """ - - # GH6026 - if check: - try: - if (self.values[locs] == values).all(): - return - except (IndexError, ValueError): - pass try: self.values[locs] = values except (ValueError): @@ -2902,7 +2894,7 @@ def should_store(self, value): not is_datetime64tz_dtype(value) and not is_extension_array_dtype(value)) - def set(self, locs, values, check=False): + def set(self, locs, values): """ Modify Block in-place with new item value @@ -3053,8 +3045,7 @@ def _try_coerce_args(self, values, other): elif (is_null_datelike_scalar(other) or (lib.is_scalar(other) and isna(other))): other = tslibs.iNaT - elif isinstance(other, (self._holder, DatetimeArray)): - # TODO: DatetimeArray check will be redundant after GH#24024 + elif isinstance(other, self._holder): if other.tz != self.values.tz: raise ValueError("incompatible or non tz-aware value") other = _block_shape(other.asi8, ndim=self.ndim) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d50f9c3e65ebd..eba49d18431ef 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1009,11 +1009,10 @@ def delete(self, item): self._shape = None self._rebuild_blknos_and_blklocs() - def set(self, item, value, check=False): + def set(self, item, value): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items - if check, then validate that we are not setting the same data in-place """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical @@ -1065,7 +1064,7 @@ def value_getitem(placement): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): - blk.set(blk_locs, value_getitem(val_locs), check=check) + blk.set(blk_locs, value_getitem(val_locs)) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 1e41369b00811..e6d18d5d4193a 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -584,23 +584,23 @@ def decode(obj): dtype = dtype_for(obj[u'dtype']) data = unconvert(obj[u'data'], dtype, obj.get(u'compress')) - return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name']) + return Index(data, dtype=dtype, name=obj[u'name']) elif typ == u'range_index': - return globals()[obj[u'klass']](obj[u'start'], - obj[u'stop'], - obj[u'step'], - name=obj[u'name']) + return RangeIndex(obj[u'start'], + obj[u'stop'], + obj[u'step'], + name=obj[u'name']) elif typ == u'multi_index': dtype = dtype_for(obj[u'dtype']) data = unconvert(obj[u'data'], dtype, obj.get(u'compress')) data = [tuple(x) for x in data] - return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names']) + return MultiIndex.from_tuples(data, names=obj[u'names']) elif typ == u'period_index': data = unconvert(obj[u'data'], np.int64, obj.get(u'compress')) d = dict(name=obj[u'name'], freq=obj[u'freq']) freq = d.pop('freq', None) - return globals()[obj[u'klass']](PeriodArray(data, freq), **d) + return PeriodIndex(PeriodArray(data, freq), **d) elif typ == u'datetime_index': data = unconvert(obj[u'data'], np.int64, obj.get(u'compress')) @@ -631,11 +631,10 @@ def decode(obj): pd_dtype = pandas_dtype(dtype) index = obj[u'index'] - result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype, - obj[u'compress']), - index=index, - dtype=pd_dtype, - name=obj[u'name']) + result = Series(unconvert(obj[u'data'], dtype, obj[u'compress']), + index=index, + dtype=pd_dtype, + name=obj[u'name']) return result elif typ == u'block_manager': @@ -671,18 +670,18 @@ def create_block(b): return np.timedelta64(int(obj[u'data'])) # elif typ == 'sparse_series': # dtype = dtype_for(obj['dtype']) - # return globals()[obj['klass']]( + # return SparseSeries( # unconvert(obj['sp_values'], dtype, obj['compress']), # sparse_index=obj['sp_index'], index=obj['index'], # fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name']) # elif typ == 'sparse_dataframe': - # return globals()[obj['klass']]( + # return SparseDataFrame( # obj['data'], columns=obj['columns'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind'] # ) # elif typ == 'sparse_panel': - # return globals()[obj['klass']]( + # return SparsePanel( # obj['data'], items=obj['items'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind'])
grepped for TODO in the affected files and took care of the easy ones. Addresses a flake8 complaint in MultiIndex (no idea why it is only showing up locally)
https://api.github.com/repos/pandas-dev/pandas/pulls/24573
2019-01-02T22:09:22Z
2019-01-03T00:12:44Z
2019-01-03T00:12:44Z
2019-01-03T00:14:40Z
BUG: TypeError with to_html(sparsify=False) and max_cols < len(columns)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 7628c53cefa06..826c5a795f886 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1604,6 +1604,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - Bug in :func:`to_html()` with ``index=False`` when both columns and row index are ``MultiIndex`` (:issue:`22579`) - Bug in :func:`to_html()` with ``index_names=False`` displaying index name (:issue:`22747`) - Bug in :func:`to_html()` with ``header=False`` not displaying row index names (:issue:`23788`) +- Bug in :func:`to_html()` with ``sparsify=False`` that caused it to raise ``TypeError`` (:issue:`22887`) - Bug in :func:`DataFrame.to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`) - Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`) - Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`). diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 58f5364f2b523..390c3f3d5c709 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -241,7 +241,7 @@ def _write_col_header(self, indent): # GH3547 sentinel = com.sentinel_factory() else: - sentinel = None + sentinel = False levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False) level_lengths = get_level_lengths(levels, sentinel) @@ -440,9 +440,6 @@ def _write_hierarchical_rows(self, fmt_values, indent): truncate_v = self.fmt.truncate_v frame = self.fmt.tr_frame nrows = len(frame) - # TODO: after gh-22887 fixed, refactor to use class property - # in place of row_levels - row_levels = self.frame.index.nlevels idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) @@ -520,18 +517,24 @@ def _write_hierarchical_rows(self, fmt_values, indent): row.extend(fmt_values[j][i] for j in range(self.ncols)) if truncate_h: - row.insert(row_levels - sparse_offset + + row.insert(self.row_levels - sparse_offset + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, nindex_levels=len(levels) - sparse_offset) else: + row = [] for i in range(len(frame)): + if truncate_v and i == (self.fmt.tr_row_num): + str_sep_row = ['...'] * len(row) + self.write_tr(str_sep_row, indent, self.indent_delta, + tags=None, nindex_levels=self.row_levels) + idx_values = list(zip(*frame.index.format( sparsify=False, adjoin=False, names=False))) row = [] row.extend(idx_values[i]) row.extend(fmt_values[j][i] for j in range(self.ncols)) if truncate_h: - row.insert(row_levels + self.fmt.tr_col_num, '...') + row.insert(self.row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) diff --git a/pandas/tests/io/formats/data/html/truncate_multi_index_sparse_off.html b/pandas/tests/io/formats/data/html/truncate_multi_index_sparse_off.html index 05c644dfbfe08..6a7e1b5a59e3b 100644 --- a/pandas/tests/io/formats/data/html/truncate_multi_index_sparse_off.html +++ b/pandas/tests/io/formats/data/html/truncate_multi_index_sparse_off.html @@ -57,6 +57,17 @@ <td>NaN</td> <td>NaN</td> </tr> + <tr> + <th>...</th> + <th>...</th> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + </tr> <tr> <th>foo</th> <th>two</th> diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index d333330c19e39..889b903088afa 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -223,7 +223,6 @@ def test_to_html_truncate_multi_index(self, datapath): expected = expected_html(datapath, 'truncate_multi_index') assert result == expected - @pytest.mark.xfail(reason='GH22887 TypeError') def test_to_html_truncate_multi_index_sparse_off(self, datapath): arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
- [x] closes #22887 - [x] closes #11060 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24572
2019-01-02T21:25:03Z
2019-01-03T00:40:12Z
2019-01-03T00:40:12Z
2019-01-03T00:56:03Z
Added Datetime & Timedelta inference to array
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index 173ed7d191ac9..4e84c62bce3d6 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -46,12 +46,14 @@ def array(data, # type: Sequence[object] Currently, pandas will infer an extension dtype for sequences of - ========================== ================================== - scalar type Array Type - ========================== ================================== - * :class:`pandas.Interval` :class:`pandas.IntervalArray` - * :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` - ========================== ================================== + ============================== ===================================== + scalar type Array Type + ============================= ===================================== + * :class:`pandas.Interval` :class:`pandas.IntervalArray` + * :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` + * :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` + * :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` + ============================= ===================================== For all other cases, NumPy's usual inference rules will be used. @@ -62,7 +64,8 @@ def array(data, # type: Sequence[object] Returns ------- - array : ExtensionArray + ExtensionArray + The newly created array. Raises ------ @@ -180,7 +183,9 @@ def array(data, # type: Sequence[object] ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( - period_array, ExtensionArray, IntervalArray, PandasArray + period_array, ExtensionArray, IntervalArray, PandasArray, + DatetimeArrayMixin, + TimedeltaArrayMixin, ) from pandas.core.internals.arrays import extract_array @@ -220,7 +225,18 @@ def array(data, # type: Sequence[object] # We choose to return an ndarray, rather than raising. pass - # TODO(DatetimeArray): handle this type + elif inferred_dtype.startswith('datetime'): + # datetime, datetime64 + try: + return DatetimeArrayMixin._from_sequence(data, copy=copy) + except ValueError: + # Mixture of timezones, fall back to PandasArray + pass + + elif inferred_dtype.startswith('timedelta'): + # timedelta, timedelta64 + return TimedeltaArrayMixin._from_sequence(data, copy=copy) + # TODO(BooleanArray): handle this type result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 76ef85b0317ad..1d09a1f65e43f 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -1,7 +1,9 @@ +import datetime import decimal import numpy as np import pytest +import pytz from pandas.core.dtypes.dtypes import registry @@ -89,11 +91,51 @@ def test_array_copy(): assert np.shares_memory(a, b._ndarray) is True +cet = pytz.timezone("CET") + + @pytest.mark.parametrize('data, expected', [ + # period ([pd.Period("2000", "D"), pd.Period("2001", "D")], period_array(["2000", "2001"], freq="D")), + + # interval ([pd.Interval(0, 1), pd.Interval(1, 2)], pd.IntervalArray.from_breaks([0, 1, 2])), + + # datetime + ([pd.Timestamp('2000',), pd.Timestamp('2001')], + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'])), + + ([datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)], + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'])), + + (np.array([1, 2], dtype='M8[ns]'), + pd.arrays.DatetimeArray(np.array([1, 2], dtype='M8[ns]'))), + + (np.array([1, 2], dtype='M8[us]'), + pd.arrays.DatetimeArray(np.array([1000, 2000], dtype='M8[ns]'))), + + # datetimetz + ([pd.Timestamp('2000', tz='CET'), pd.Timestamp('2001', tz='CET')], + pd.arrays.DatetimeArray._from_sequence( + ['2000', '2001'], dtype=pd.DatetimeTZDtype(tz='CET'))), + + ([datetime.datetime(2000, 1, 1, tzinfo=cet), + datetime.datetime(2001, 1, 1, tzinfo=cet)], + pd.arrays.DatetimeArray._from_sequence(['2000', '2001'], + tz=cet)), + + # timedelta + ([pd.Timedelta('1H'), pd.Timedelta('2H')], + pd.arrays.TimedeltaArray._from_sequence(['1H', '2H'])), + + (np.array([1, 2], dtype='m8[ns]'), + pd.arrays.TimedeltaArray(np.array([1, 2], dtype='m8[ns]'))), + + (np.array([1, 2], dtype='m8[us]'), + pd.arrays.TimedeltaArray(np.array([1000, 2000], dtype='m8[ns]'))), + ]) def test_array_inference(data, expected): result = pd.array(data) @@ -105,6 +147,15 @@ def test_array_inference(data, expected): [pd.Period("2000", "D"), pd.Period("2001", "A")], # mix of closed [pd.Interval(0, 1, closed='left'), pd.Interval(1, 2, closed='right')], + # Mix of timezones + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], + # Mix of tz-aware and tz-naive + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")], + # GH-24569 + pytest.param( + np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]), + marks=pytest.mark.xfail(reason="bug in DTA._from_sequence") + ), ]) def test_array_inference_fails(data): result = pd.array(data)
Closes https://github.com/pandas-dev/pandas/issues/24568
https://api.github.com/repos/pandas-dev/pandas/pulls/24571
2019-01-02T20:10:35Z
2019-01-03T00:41:37Z
2019-01-03T00:41:37Z
2019-01-03T00:41:41Z
TST: isort tests/sparse
diff --git a/pandas/tests/sparse/frame/conftest.py b/pandas/tests/sparse/frame/conftest.py index f36b4e643d10b..3423260c1720a 100644 --- a/pandas/tests/sparse/frame/conftest.py +++ b/pandas/tests/sparse/frame/conftest.py @@ -1,8 +1,7 @@ -import pytest - import numpy as np +import pytest -from pandas import SparseDataFrame, SparseArray, DataFrame, bdate_range +from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6], diff --git a/pandas/tests/sparse/frame/test_analytics.py b/pandas/tests/sparse/frame/test_analytics.py index 2d9ccaa059a8c..95c1c8c453d0a 100644 --- a/pandas/tests/sparse/frame/test_analytics.py +++ b/pandas/tests/sparse/frame/test_analytics.py @@ -1,6 +1,7 @@ -import pytest import numpy as np -from pandas import SparseDataFrame, DataFrame, SparseSeries +import pytest + +from pandas import DataFrame, SparseDataFrame, SparseSeries from pandas.util import testing as tm diff --git a/pandas/tests/sparse/frame/test_apply.py b/pandas/tests/sparse/frame/test_apply.py index c26776ac4fd49..b5ea0a5c90e1a 100644 --- a/pandas/tests/sparse/frame/test_apply.py +++ b/pandas/tests/sparse/frame/test_apply.py @@ -1,8 +1,9 @@ -import pytest import numpy as np -from pandas import SparseDataFrame, DataFrame, Series, bdate_range -from pandas.core.sparse.api import SparseDtype +import pytest + +from pandas import DataFrame, Series, SparseDataFrame, bdate_range from pandas.core import nanops +from pandas.core.sparse.api import SparseDtype from pandas.util import testing as tm diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 21100e3c3ffeb..f908c7b263dee 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -2,25 +2,24 @@ import operator -import pytest -from numpy import nan import numpy as np -import pandas as pd +from numpy import nan +import pytest -from pandas import Series, DataFrame, bdate_range, Panel +from pandas._libs.sparse import BlockIndex, IntIndex +from pandas.compat import lrange from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import DataFrame, Panel, Series, bdate_range, compat from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.tseries.offsets import BDay -from pandas.util import testing as tm -from pandas.compat import lrange -from pandas import compat from pandas.core.sparse import frame as spf - -from pandas._libs.sparse import BlockIndex, IntIndex from pandas.core.sparse.api import ( - SparseSeries, SparseDataFrame, SparseArray, SparseDtype -) + SparseArray, SparseDataFrame, SparseDtype, SparseSeries) from pandas.tests.frame.test_api import SharedWithSparse +from pandas.util import testing as tm + +from pandas.tseries.offsets import BDay class TestSparseDataFrame(SharedWithSparse): diff --git a/pandas/tests/sparse/frame/test_indexing.py b/pandas/tests/sparse/frame/test_indexing.py index e4ca3b90ff8d0..2d2a7ac278dd6 100644 --- a/pandas/tests/sparse/frame/test_indexing.py +++ b/pandas/tests/sparse/frame/test_indexing.py @@ -1,8 +1,8 @@ -import pytest import numpy as np -from pandas import SparseDataFrame, DataFrame -from pandas.util import testing as tm +import pytest +from pandas import DataFrame, SparseDataFrame +from pandas.util import testing as tm pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)") diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py index b0243dfde8d3f..ed19872f8a7ef 100644 --- a/pandas/tests/sparse/frame/test_to_csv.py +++ b/pandas/tests/sparse/frame/test_to_csv.py @@ -1,5 +1,6 @@ import numpy as np import pytest + from pandas import SparseDataFrame, read_csv from pandas.util import testing as tm diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py index e5c50e9574f90..bdb2cd022b451 100644 --- a/pandas/tests/sparse/frame/test_to_from_scipy.py +++ b/pandas/tests/sparse/frame/test_to_from_scipy.py @@ -1,13 +1,14 @@ -import pytest +from distutils.version import LooseVersion + import numpy as np +import pytest + +from pandas.core.dtypes.common import is_bool_dtype + import pandas as pd -from pandas.util import testing as tm from pandas import SparseDataFrame, SparseSeries from pandas.core.sparse.api import SparseDtype -from distutils.version import LooseVersion -from pandas.core.dtypes.common import ( - is_bool_dtype, -) +from pandas.util import testing as tm scipy = pytest.importorskip('scipy') ignore_matrix_warning = pytest.mark.filterwarnings( diff --git a/pandas/tests/sparse/series/test_indexing.py b/pandas/tests/sparse/series/test_indexing.py index 989cf3b974560..0f4235d7cc3fe 100644 --- a/pandas/tests/sparse/series/test_indexing.py +++ b/pandas/tests/sparse/series/test_indexing.py @@ -1,8 +1,8 @@ -import pytest import numpy as np -from pandas import SparseSeries, Series -from pandas.util import testing as tm +import pytest +from pandas import Series, SparseSeries +from pandas.util import testing as tm pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)") diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 225ef96581e72..7eed47d0de888 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -1,28 +1,26 @@ # pylint: disable-msg=E1101,W0612 -import operator from datetime import datetime +import operator -import pytest - -from numpy import nan import numpy as np -import pandas as pd - +from numpy import nan +import pytest -from pandas import Series, DataFrame, bdate_range, isna, compat +from pandas._libs.sparse import BlockIndex, IntIndex +from pandas.compat import PY36, range from pandas.errors import PerformanceWarning -from pandas.tseries.offsets import BDay -import pandas.util.testing as tm import pandas.util._test_decorators as td -from pandas.compat import range, PY36 -from pandas.core.reshape.util import cartesian_product +import pandas as pd +from pandas import ( + DataFrame, Series, SparseDtype, SparseSeries, bdate_range, compat, isna) +from pandas.core.reshape.util import cartesian_product import pandas.core.sparse.frame as spf - -from pandas._libs.sparse import BlockIndex, IntIndex -from pandas import SparseSeries, SparseDtype from pandas.tests.series.test_api import SharedWithSparse +import pandas.util.testing as tm + +from pandas.tseries.offsets import BDay def _test_data1(): diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index 92483f1e7511e..97d5aaca82778 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -1,11 +1,13 @@ # pylint: disable-msg=E1101,W0612 -import pytest +import itertools import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + import pandas as pd import pandas.util.testing as tm -from pandas.errors import PerformanceWarning -import itertools class TestSparseArrayConcat(object): diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py index 4186f579f62f5..63018f9525b1f 100644 --- a/pandas/tests/sparse/test_format.py +++ b/pandas/tests/sparse/test_format.py @@ -2,13 +2,12 @@ from __future__ import print_function import numpy as np -import pandas as pd -import pandas.util.testing as tm -from pandas.compat import (is_platform_windows, - is_platform_32bit) -from pandas.core.config import option_context +from pandas.compat import is_platform_32bit, is_platform_windows +import pandas as pd +from pandas.core.config import option_context +import pandas.util.testing as tm use_32bit_repr = is_platform_windows() or is_platform_32bit() diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py index fb10473ec78a8..6d8c6f13cd32b 100644 --- a/pandas/tests/sparse/test_indexing.py +++ b/pandas/tests/sparse/test_indexing.py @@ -1,10 +1,11 @@ # pylint: disable-msg=E1101,W0612 -import pytest import numpy as np +import pytest + import pandas as pd -import pandas.util.testing as tm from pandas.core.sparse.api import SparseDtype +import pandas.util.testing as tm class TestSparseSeriesIndexing(object): diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py index 0e71048f51177..af7de43ec0f8a 100644 --- a/pandas/tests/sparse/test_pivot.py +++ b/pandas/tests/sparse/test_pivot.py @@ -1,4 +1,5 @@ import numpy as np + import pandas as pd import pandas.util.testing as tm diff --git a/pandas/tests/sparse/test_reshape.py b/pandas/tests/sparse/test_reshape.py index d4ba672607982..6830e40ce6533 100644 --- a/pandas/tests/sparse/test_reshape.py +++ b/pandas/tests/sparse/test_reshape.py @@ -1,5 +1,5 @@ -import pytest import numpy as np +import pytest import pandas as pd import pandas.util.testing as tm diff --git a/setup.cfg b/setup.cfg index d4cdd57e7a448..032a41df90f83 100644 --- a/setup.cfg +++ b/setup.cfg @@ -145,23 +145,8 @@ skip= pandas/tests/plotting/common.py, pandas/tests/plotting/test_boxplot_method.py, pandas/tests/plotting/test_deprecated.py, - pandas/tests/sparse/test_indexing.py, pandas/tests/extension/test_sparse.py, pandas/tests/extension/base/reduce.py, - pandas/tests/sparse/test_reshape.py, - pandas/tests/sparse/test_pivot.py, - pandas/tests/sparse/test_format.py, - pandas/tests/sparse/test_groupby.py, - pandas/tests/sparse/test_combine_concat.py, - pandas/tests/sparse/series/test_indexing.py, - pandas/tests/sparse/series/test_series.py, - pandas/tests/sparse/frame/test_indexing.py, - pandas/tests/sparse/frame/test_to_from_scipy.py, - pandas/tests/sparse/frame/test_to_csv.py, - pandas/tests/sparse/frame/test_apply.py, - pandas/tests/sparse/frame/test_analytics.py, - pandas/tests/sparse/frame/test_frame.py, - pandas/tests/sparse/frame/conftest.py, pandas/tests/computation/test_compat.py, pandas/tests/computation/test_eval.py, pandas/types/common.py,
xref #23334
https://api.github.com/repos/pandas-dev/pandas/pulls/24563
2019-01-02T18:10:49Z
2019-01-03T00:45:43Z
2019-01-03T00:45:43Z
2019-01-03T00:54:58Z
See also description formatting
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 3a522baaa92af..3147f36dcc835 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -389,8 +389,8 @@ cdef class Interval(IntervalMixin): See Also -------- - IntervalArray.overlaps : The corresponding method for IntervalArray - IntervalIndex.overlaps : The corresponding method for IntervalIndex + IntervalArray.overlaps : The corresponding method for IntervalArray. + IntervalIndex.overlaps : The corresponding method for IntervalIndex. Examples -------- diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 483b84940dbc8..2f4edb7de8f95 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1830,9 +1830,8 @@ cdef class _Period(object): See Also -------- - Period.dayofweek : Get the day of the week - - Period.dayofyear : Get the day of the year + Period.dayofweek : Get the day of the week. + Period.dayofyear : Get the day of the year. Examples -------- @@ -2189,8 +2188,8 @@ cdef class _Period(object): See Also -------- - Period.days_in_month : Return the days of the month - Period.dayofyear : Return the day of the year + Period.days_in_month : Return the days of the month. + Period.dayofyear : Return the day of the year. Examples -------- diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 20ac13ed0ef71..49187aad4f1eb 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -660,9 +660,9 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan): See Also -------- - contains : analogous, but less strict, relying on re.search instead of - re.match - extract : extract matched groups + contains : Analogous, but less strict, relying on re.search instead of + re.match. + extract : Extract matched groups. """ if not case: flags |= re.IGNORECASE @@ -1255,13 +1255,13 @@ def str_pad(arr, width, side='left', fillchar=' '): See Also -------- - Series.str.rjust: Fills the left side of strings with an arbitrary + Series.str.rjust : Fills the left side of strings with an arbitrary character. Equivalent to ``Series.str.pad(side='left')``. - Series.str.ljust: Fills the right side of strings with an arbitrary + Series.str.ljust : Fills the right side of strings with an arbitrary character. Equivalent to ``Series.str.pad(side='right')``. - Series.str.center: Fills boths sides of strings with an arbitrary + Series.str.center : Fills boths sides of strings with an arbitrary character. Equivalent to ``Series.str.pad(side='both')``. - Series.str.zfill: Pad strings in the Series/Index by prepending '0' + Series.str.zfill : Pad strings in the Series/Index by prepending '0' character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``. Examples @@ -2485,7 +2485,8 @@ def rsplit(self, pat=None, n=-1, expand=False): 'side': 'first', 'return': '3 elements containing the string itself, followed by two ' 'empty strings', - 'also': 'rpartition : Split the string at the last occurrence of `sep`' + 'also': 'rpartition : Split the string at the last occurrence of ' + '`sep`.' }) @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep') def partition(self, sep=' ', expand=True): @@ -2497,7 +2498,8 @@ def partition(self, sep=' ', expand=True): 'side': 'last', 'return': '3 elements containing two empty strings, followed by the ' 'string itself', - 'also': 'partition : Split the string at the first occurrence of `sep`' + 'also': 'partition : Split the string at the first occurrence of ' + '`sep`.' }) @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep') def rpartition(self, sep=' ', expand=True): @@ -2593,13 +2595,13 @@ def zfill(self, width): See Also -------- - Series.str.rjust: Fills the left side of strings with an arbitrary + Series.str.rjust : Fills the left side of strings with an arbitrary character. - Series.str.ljust: Fills the right side of strings with an arbitrary + Series.str.ljust : Fills the right side of strings with an arbitrary character. - Series.str.pad: Fills the specified sides of strings with an arbitrary + Series.str.pad : Fills the specified sides of strings with an arbitrary character. - Series.str.center: Fills boths sides of strings with an arbitrary + Series.str.center : Fills boths sides of strings with an arbitrary character. Notes @@ -2793,14 +2795,14 @@ def extractall(self, pat, flags=0): @Appender(_shared_docs['find'] % dict(side='lowest', method='find', - also='rfind : Return highest indexes in each strings')) + also='rfind : Return highest indexes in each strings.')) def find(self, sub, start=0, end=None): result = str_find(self._parent, sub, start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['find'] % dict(side='highest', method='rfind', - also='find : Return lowest indexes in each strings')) + also='find : Return lowest indexes in each strings.')) def rfind(self, sub, start=0, end=None): result = str_find(self._parent, sub, start=start, end=end, side='right') @@ -2852,7 +2854,7 @@ def normalize(self, form): @Appender(_shared_docs['index'] % dict(side='lowest', similar='find', method='index', - also='rindex : Return highest indexes in each strings')) + also='rindex : Return highest indexes in each strings.')) def index(self, sub, start=0, end=None): result = str_index(self._parent, sub, start=start, end=end, side='left') @@ -2860,7 +2862,7 @@ def index(self, sub, start=0, end=None): @Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex', - also='index : Return lowest indexes in each strings')) + also='index : Return lowest indexes in each strings.')) def rindex(self, sub, start=0, end=None): result = str_index(self._parent, sub, start=start, end=end, side='right')
- [x] xref #23630 DOC: Fix format of the See Also descriptions - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24561
2019-01-02T17:47:18Z
2019-01-04T01:10:15Z
2019-01-04T01:10:15Z
2019-01-04T01:20:17Z
REF: shift ravel in infer_dtype
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d6e2b9a5288f5..1124000c97875 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -623,7 +623,7 @@ def clean_index_list(obj: list): return obj, all_arrays # don't force numpy coerce with nan's - inferred = infer_dtype(obj) + inferred = infer_dtype(obj, skipna=False) if inferred in ['string', 'bytes', 'unicode', 'mixed', 'mixed-integer']: return np.asarray(obj, dtype=object), 0 elif inferred in ['integer']: @@ -1210,6 +1210,10 @@ def infer_dtype(value: object, skipna: bool=False) -> str: values = construct_1d_object_array_from_listlike(value) values = getattr(values, 'values', values) + + # make contiguous + values = values.ravel() + if skipna: values = values[~isnaobj(values)] @@ -1220,9 +1224,6 @@ def infer_dtype(value: object, skipna: bool=False) -> str: if values.dtype != np.object_: values = values.astype('O') - # make contiguous - values = values.ravel() - n = len(values) if n == 0: return 'empty' diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 94d716a08d9dc..b473a7aef929e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -165,7 +165,7 @@ def _ensure_arraylike(values): ensure that we are arraylike if not already """ if not is_array_like(values): - inferred = lib.infer_dtype(values) + inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) @@ -202,8 +202,10 @@ def _get_hashtable_algo(values): if ndtype == 'object': - # its cheaper to use a String Hash Table than Object - if lib.infer_dtype(values) in ['string']: + # it's cheaper to use a String Hash Table than Object; we infer + # including nulls because that is the only difference between + # StringHashTable and ObjectHashtable + if lib.infer_dtype(values, skipna=False) in ['string']: ndtype = 'string' else: ndtype = 'object' @@ -220,8 +222,10 @@ def _get_data_algo(values, func_map): values, dtype, ndtype = _ensure_data(values) if ndtype == 'object': - # its cheaper to use a String Hash Table than Object - if lib.infer_dtype(values) in ['string']: + # it's cheaper to use a String Hash Table than Object; we infer + # including nulls because that is the only difference between + # StringHashTable and ObjectHashtable + if lib.infer_dtype(values, skipna=False) in ['string']: ndtype = 'string' f = func_map.get(ndtype, func_map['object']) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ea2742c5808a3..281fbe14e48c5 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1652,7 +1652,7 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, # TODO: We do not have tests specific to string-dtypes, # also complex or categorical or other extension copy = False - if lib.infer_dtype(data) == 'integer': + if lib.infer_dtype(data, skipna=False) == 'integer': data = data.astype(np.int64) else: # data comes back here as either i8 to denote UTC timestamps diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index eaec76b96a24d..af2c05bbee7c2 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -171,8 +171,8 @@ def coerce_to_array(values, dtype, mask=None, copy=False): values = np.array(values, copy=copy) if is_object_dtype(values): - inferred_type = lib.infer_dtype(values) - if inferred_type is 'mixed' and isna(values).all(): + inferred_type = lib.infer_dtype(values, skipna=True) + if inferred_type == 'empty': values = np.empty(len(values)) values.fill(np.nan) elif inferred_type not in ['floating', 'integer', diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index b747e2b6b096b..b4b6d64b95b56 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -594,7 +594,7 @@ def __floordiv__(self, other): elif is_object_dtype(other): result = [self[n] // other[n] for n in range(len(self))] result = np.array(result) - if lib.infer_dtype(result) == 'timedelta': + if lib.infer_dtype(result, skipna=False) == 'timedelta': result, _ = sequence_to_td64ns(result) return type(self)(result) return result diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8f26f7ac209b1..b55bad46580fe 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -75,7 +75,8 @@ def trans(x): if isinstance(dtype, string_types): if dtype == 'infer': - inferred_type = lib.infer_dtype(ensure_object(result.ravel())) + inferred_type = lib.infer_dtype(ensure_object(result.ravel()), + skipna=False) if inferred_type == 'boolean': dtype = 'bool' elif inferred_type == 'integer': @@ -460,7 +461,7 @@ def infer_dtype_from_array(arr, pandas_dtype=False): return arr.dtype, np.asarray(arr) # don't force numpy coerce with nan's - inferred = lib.infer_dtype(arr) + inferred = lib.infer_dtype(arr, skipna=False) if inferred in ['string', 'bytes', 'unicode', 'mixed', 'mixed-integer']: return (np.object_, arr) @@ -941,10 +942,11 @@ def try_timedelta(v): # We have at least a NaT and a string # try timedelta first to avoid spurious datetime conversions - # e.g. '00:00:01' is a timedelta but - # technically is also a datetime + # e.g. '00:00:01' is a timedelta but technically is also a datetime value = try_timedelta(v) - if lib.infer_dtype(value) in ['mixed']: + if lib.infer_dtype(value, skipna=False) in ['mixed']: + # cannot skip missing values, as NaT implies that the string + # is actually a datetime value = try_datetime(v) return value diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 293ce7d8e4aca..b4c769fab88ad 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -703,7 +703,8 @@ def is_datetime_arraylike(arr): if isinstance(arr, ABCDatetimeIndex): return True elif isinstance(arr, (np.ndarray, ABCSeries)): - return arr.dtype == object and lib.infer_dtype(arr) == 'datetime' + return (is_object_dtype(arr.dtype) + and lib.infer_dtype(arr, skipna=False) == 'datetime') return getattr(arr, 'inferred_type', None) == 'datetime' diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 21ec14ace3e44..b22cb1050f140 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -474,7 +474,7 @@ def _infer_fill_value(val): if is_datetimelike(val): return np.array('NaT', dtype=val.dtype) elif is_object_dtype(val.dtype): - dtype = lib.infer_dtype(ensure_object(val)) + dtype = lib.infer_dtype(ensure_object(val), skipna=False) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=_NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c702eae5da012..a7f2d4fad38de 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -346,7 +346,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # should not be coerced # GH 11836 if is_integer_dtype(dtype): - inferred = lib.infer_dtype(data) + inferred = lib.infer_dtype(data, skipna=False) if inferred == 'integer': data = maybe_cast_to_integer_array(data, dtype, copy=copy) @@ -376,7 +376,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, else: data = data.astype(dtype) elif is_float_dtype(dtype): - inferred = lib.infer_dtype(data) + inferred = lib.infer_dtype(data, skipna=False) if inferred == 'string': pass else: @@ -414,7 +414,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, subarr = subarr.copy() if dtype is None: - inferred = lib.infer_dtype(subarr) + inferred = lib.infer_dtype(subarr, skipna=False) if inferred == 'integer': try: return cls._try_convert_to_int_index( @@ -1718,7 +1718,7 @@ def inferred_type(self): """ Return a string of the type inferred from the values. """ - return lib.infer_dtype(self) + return lib.infer_dtype(self, skipna=False) @cache_readonly def is_all_dates(self): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 253ce2a28d165..8d26080a0361d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2318,7 +2318,8 @@ def _partial_tup_index(self, tup, side='left'): section = labs[start:end] if lab not in lev: - if not lev.is_type_compatible(lib.infer_dtype([lab])): + if not lev.is_type_compatible(lib.infer_dtype([lab], + skipna=False)): raise TypeError('Level type mismatch: %s' % lab) # short circuit diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index b3c893c7d84be..62e7f64518bcc 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -667,7 +667,7 @@ def sanitize_array(data, index, dtype=None, copy=False, subarr = np.array(data, dtype=object, copy=copy) if is_object_dtype(subarr.dtype) and dtype != 'object': - inferred = lib.infer_dtype(subarr) + inferred = lib.infer_dtype(subarr, skipna=False) if inferred == 'period': try: subarr = period_array(subarr) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 130bc2b080c72..191cd5d63eea3 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -947,7 +947,8 @@ def _maybe_coerce_merge_keys(self): continue # let's infer and see if we are ok - elif lib.infer_dtype(lk) == lib.infer_dtype(rk): + elif (lib.infer_dtype(lk, skipna=False) + == lib.infer_dtype(rk, skipna=False)): continue # Check if we are trying to merge on obviously diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 21a93f7deec8b..6f95b14993228 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -416,7 +416,7 @@ def _convert_bin_to_numeric_type(bins, dtype): ------ ValueError if bins are not of a compat dtype to dtype """ - bins_dtype = infer_dtype(bins) + bins_dtype = infer_dtype(bins, skipna=False) if is_timedelta64_dtype(dtype): if bins_dtype in ['timedelta', 'timedelta64']: bins = to_timedelta(bins).view(np.int64) diff --git a/pandas/core/series.py b/pandas/core/series.py index 3637081e09f8c..52b60339a7d68 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -875,7 +875,7 @@ def _get_with(self, key): if isinstance(key, Index): key_type = key.inferred_type else: - key_type = lib.infer_dtype(key) + key_type = lib.infer_dtype(key, skipna=False) if key_type == 'integer': if self.index.is_integer() or self.index.is_floating(): @@ -1012,7 +1012,7 @@ def _set_with(self, key, value): if isinstance(key, Index): key_type = key.inferred_type else: - key_type = lib.infer_dtype(key) + key_type = lib.infer_dtype(key, skipna=False) if key_type == 'integer': if self.index.inferred_type == 'integer': diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index b34dfddcc66e1..ef69939d6e978 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -454,7 +454,7 @@ def sort_mixed(values): return np.concatenate([nums, np.asarray(strs, dtype=object)]) sorter = None - if PY3 and lib.infer_dtype(values) == 'mixed-integer': + if PY3 and lib.infer_dtype(values, skipna=False) == 'mixed-integer': # unorderable in py3 if mixed str/int ordered = sort_mixed(values) else: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2861f32e54e5e..5590e8f445c67 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1300,7 +1300,7 @@ def _validate_usecols_arg(usecols): elif not is_list_like(usecols): raise ValueError(msg) else: - usecols_dtype = lib.infer_dtype(usecols) + usecols_dtype = lib.infer_dtype(usecols, skipna=False) if usecols_dtype not in ('empty', 'integer', 'string', 'unicode'): raise ValueError(msg) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index cec594a13b3d3..b115529f696b8 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1952,7 +1952,7 @@ def set_atom(self, block, block_items, existing_col, min_itemsize, return self.set_atom_complex(block) dtype = block.dtype.name - inferred_type = lib.infer_dtype(block.values) + inferred_type = lib.infer_dtype(block.values, skipna=False) if inferred_type == 'date': raise TypeError( @@ -1998,7 +1998,7 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize, data = block.values # see if we have a valid string type - inferred_type = lib.infer_dtype(data.ravel()) + inferred_type = lib.infer_dtype(data.ravel(), skipna=False) if inferred_type != 'string': # we cannot serialize this data, so report an exception on a column @@ -2006,7 +2006,7 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize, for i, item in enumerate(block_items): col = block.iget(i) - inferred_type = lib.infer_dtype(col.ravel()) + inferred_type = lib.infer_dtype(col.ravel(), skipna=False) if inferred_type != 'string': raise TypeError( "Cannot serialize the column [%s] because\n" @@ -2745,7 +2745,7 @@ def write_array(self, key, value, items=None): # infer the type, warn if we have a non-string type here (for # performance) - inferred_type = lib.infer_dtype(value.ravel()) + inferred_type = lib.infer_dtype(value.ravel(), skipna=False) if empty_array: pass elif inferred_type == 'string': @@ -4512,7 +4512,7 @@ def _convert_index(index, encoding=None, errors='strict', format_type=None): if isinstance(index, MultiIndex): raise TypeError('MultiIndex not supported here!') - inferred_type = lib.infer_dtype(index) + inferred_type = lib.infer_dtype(index, skipna=False) values = np.asarray(index) @@ -4745,7 +4745,7 @@ def __init__(self, table, where=None, start=None, stop=None): # see if we have a passed coordinate like try: - inferred = lib.infer_dtype(where) + inferred = lib.infer_dtype(where, skipna=False) if inferred == 'integer' or inferred == 'boolean': where = np.asarray(where) if where.dtype == np.bool_: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 0eefa85211194..2f4093e154a95 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -857,27 +857,15 @@ def _harmonize_columns(self, parse_dates=None): except KeyError: pass # this column not in results - def _get_notna_col_dtype(self, col): - """ - Infer datatype of the Series col. In case the dtype of col is 'object' - and it contains NA values, this infers the datatype of the not-NA - values. Needed for inserting typed data containing NULLs, GH8778. - """ - col_for_inference = col - if col.dtype == 'object': - notnadata = col[~isna(col)] - if len(notnadata): - col_for_inference = notnadata - - return lib.infer_dtype(col_for_inference) - def _sqlalchemy_type(self, col): dtype = self.dtype or {} if col.name in dtype: return self.dtype[col.name] - col_type = self._get_notna_col_dtype(col) + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import (BigInteger, Integer, Float, Text, Boolean, @@ -1374,7 +1362,10 @@ def _sql_type_name(self, col): if col.name in dtype: return dtype[col.name] - col_type = self._get_notna_col_dtype(col) + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) + if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " diff --git a/pandas/io/stata.py b/pandas/io/stata.py index fcd99e7cdce0d..aad57fc489fb6 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -396,7 +396,7 @@ def parse_dates_safe(dates, delta=False, year=False, days=False): to_datetime(d['year'], format='%Y').astype(np.int64)) d['days'] = days // NS_PER_DAY - elif infer_dtype(dates) == 'datetime': + elif infer_dtype(dates, skipna=False) == 'datetime': if delta: delta = dates.values - stata_epoch f = lambda x: \ @@ -1867,7 +1867,7 @@ def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, if force_strl: return '%9s' if dtype.type == np.object_: - inferred_dtype = infer_dtype(column.dropna()) + inferred_dtype = infer_dtype(column, skipna=True) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Column `{col}` cannot be exported.\n\nOnly ' diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 8cab00fba3aa8..4c6b3b5132fec 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -246,7 +246,7 @@ def _convert_1d(values, units, axis): return values.asfreq(axis.freq)._ndarray_values elif isinstance(values, Index): return values.map(lambda x: get_datevalue(x, axis.freq)) - elif lib.infer_dtype(values) == 'period': + elif lib.infer_dtype(values, skipna=False) == 'period': # https://github.com/pandas-dev/pandas/issues/24304 # convert ndarray[period] -> PeriodIndex return PeriodIndex(values, freq=axis.freq)._ndarray_values diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index d9b1b0db90562..fff91991ee251 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -342,11 +342,11 @@ def test_infer_dtype_bytes(self): # string array of bytes arr = np.array(list('abc'), dtype='S1') - assert lib.infer_dtype(arr) == compare + assert lib.infer_dtype(arr, skipna=False) == compare # object array of bytes arr = arr.astype(object) - assert lib.infer_dtype(arr) == compare + assert lib.infer_dtype(arr, skipna=False) == compare # object array of bytes with missing values assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare @@ -530,87 +530,91 @@ def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype): # make sure the inferred dtype of the fixture is as requested assert inferred_dtype == lib.infer_dtype(values, skipna=True) - def test_length_zero(self): - result = lib.infer_dtype(np.array([], dtype='i4')) + @pytest.mark.parametrize('skipna', [True, False]) + def test_length_zero(self, skipna): + result = lib.infer_dtype(np.array([], dtype='i4'), skipna=skipna) assert result == 'integer' - result = lib.infer_dtype([]) + result = lib.infer_dtype([], skipna=skipna) assert result == 'empty' # GH 18004 arr = np.array([np.array([], dtype=object), np.array([], dtype=object)]) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=skipna) assert result == 'empty' def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'integer' arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='i4') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'integer' def test_bools(self): arr = np.array([True, False, True, True, True], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'boolean' arr = np.array([np.bool_(True), np.bool_(False)], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'boolean' arr = np.array([True, False, True, 'foo'], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed' arr = np.array([True, False, True], dtype=bool) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'boolean' arr = np.array([True, np.nan, False], dtype='O') result = lib.infer_dtype(arr, skipna=True) assert result == 'boolean' + result = lib.infer_dtype(arr, skipna=False) + assert result == 'mixed' + def test_floats(self): arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'floating' arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='f4') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'floating' arr = np.array([1, 2, 3, 4, 5], dtype='f8') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'floating' def test_decimals(self): # GH15690 arr = np.array([Decimal(1), Decimal(2), Decimal(3)]) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'decimal' arr = np.array([1.0, 2.0, Decimal(3)]) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed' arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)]) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'decimal' arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'decimal' def test_string(self): @@ -618,7 +622,7 @@ def test_string(self): def test_unicode(self): arr = [u'a', np.nan, u'c'] - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed' arr = [u'a', np.nan, u'c'] @@ -652,135 +656,135 @@ def test_infer_dtype_datetime(self): arr = np.array([Timestamp('2011-01-01'), Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-01')], dtype=object) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02')]) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1)]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([n, pd.Timestamp('2011-01-02'), n]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02'), n]) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1), n]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' # different type of nat arr = np.array([np.timedelta64('nat'), np.datetime64('2011-01-02')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([np.datetime64('2011-01-02'), np.timedelta64('nat')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' # mixed datetime arr = np.array([datetime(2011, 1, 1), pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' # should be datetime? arr = np.array([np.datetime64('2011-01-01'), pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([pd.Timestamp('2011-01-02'), np.datetime64('2011-01-01')]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1]) - assert lib.infer_dtype(arr) == 'mixed-integer' + assert lib.infer_dtype(arr, skipna=False) == 'mixed-integer' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' def test_infer_dtype_timedelta(self): arr = np.array([pd.Timedelta('1 days'), pd.Timedelta('2 days')]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D')], dtype=object) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([timedelta(1), timedelta(2)]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, Timedelta('1 days')]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D')]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([n, timedelta(1)]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([n, pd.Timedelta('1 days'), n]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D'), n]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([n, timedelta(1), n]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' # different type of nat arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' def test_infer_dtype_period(self): # GH 13664 arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='D')]) - assert lib.infer_dtype(arr) == 'period' + assert lib.infer_dtype(arr, skipna=False) == 'period' arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='M')]) - assert lib.infer_dtype(arr) == 'period' + assert lib.infer_dtype(arr, skipna=False) == 'period' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Period('2011-01', freq='D')]) - assert lib.infer_dtype(arr) == 'period' + assert lib.infer_dtype(arr, skipna=False) == 'period' arr = np.array([n, pd.Period('2011-01', freq='D'), n]) - assert lib.infer_dtype(arr) == 'period' + assert lib.infer_dtype(arr, skipna=False) == 'period' # different type of nat arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' @pytest.mark.parametrize( "data", @@ -850,60 +854,62 @@ def test_infer_datetimelike_array_nan_nat_like(self, first, second, def test_infer_dtype_all_nan_nat_like(self): arr = np.array([np.nan, np.nan]) - assert lib.infer_dtype(arr) == 'floating' + assert lib.infer_dtype(arr, skipna=False) == 'floating' # nan and None mix are result in mixed arr = np.array([np.nan, np.nan, None]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'empty' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([None, np.nan, np.nan]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'empty' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' # pd.NaT arr = np.array([pd.NaT]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([pd.NaT, np.nan]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([np.nan, pd.NaT]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([np.nan, pd.NaT, np.nan]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' arr = np.array([None, pd.NaT, None]) - assert lib.infer_dtype(arr) == 'datetime' + assert lib.infer_dtype(arr, skipna=False) == 'datetime' # np.datetime64(nat) arr = np.array([np.datetime64('nat')]) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' for n in [np.nan, pd.NaT, None]: arr = np.array([n, np.datetime64('nat'), n]) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' arr = np.array([pd.NaT, n, np.datetime64('nat'), n]) - assert lib.infer_dtype(arr) == 'datetime64' + assert lib.infer_dtype(arr, skipna=False) == 'datetime64' arr = np.array([np.timedelta64('nat')], dtype=object) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' for n in [np.nan, pd.NaT, None]: arr = np.array([n, np.timedelta64('nat'), n]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' arr = np.array([pd.NaT, n, np.timedelta64('nat'), n]) - assert lib.infer_dtype(arr) == 'timedelta' + assert lib.infer_dtype(arr, skipna=False) == 'timedelta' # datetime / timedelta mixed arr = np.array([pd.NaT, np.datetime64('nat'), np.timedelta64('nat'), np.nan]) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' arr = np.array([np.timedelta64('nat'), np.datetime64('nat')], dtype=object) - assert lib.infer_dtype(arr) == 'mixed' + assert lib.infer_dtype(arr, skipna=False) == 'mixed' def test_is_datetimelike_array_all_nan_nat_like(self): arr = np.array([np.nan, pd.NaT, np.datetime64('nat')]) @@ -967,7 +973,7 @@ def test_date(self): assert index.inferred_type == 'date' dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan] - result = lib.infer_dtype(dates) + result = lib.infer_dtype(dates, skipna=False) assert result == 'mixed' result = lib.infer_dtype(dates, skipna=True) @@ -1011,8 +1017,10 @@ def test_object(self): # GH 7431 # cannot infer more than this as only a single element arr = np.array([None], dtype='O') - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'mixed' + result = lib.infer_dtype(arr, skipna=True) + assert result == 'empty' def test_to_object_array_width(self): # see gh-13320 @@ -1043,17 +1051,17 @@ def test_categorical(self): # GH 8974 from pandas import Categorical, Series arr = Categorical(list('abc')) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'categorical' - result = lib.infer_dtype(Series(arr)) + result = lib.infer_dtype(Series(arr), skipna=False) assert result == 'categorical' arr = Categorical(list('abc'), categories=['cegfab'], ordered=True) - result = lib.infer_dtype(arr) + result = lib.infer_dtype(arr, skipna=False) assert result == 'categorical' - result = lib.infer_dtype(Series(arr)) + result = lib.infer_dtype(Series(arr), skipna=False) assert result == 'categorical' diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f5a445e2cca9a..a9f78096f3cd1 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -806,12 +806,12 @@ def test_constructor_with_datetime_tz(self): s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]) assert s.dtype == 'datetime64[ns, US/Pacific]' - assert lib.infer_dtype(s) == 'datetime64' + assert lib.infer_dtype(s, skipna=False) == 'datetime64' s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')]) assert s.dtype == 'object' - assert lib.infer_dtype(s) == 'datetime' + assert lib.infer_dtype(s, skipna=False) == 'datetime' # with all NaT s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]') diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index d4ea21632edf9..7cea3be03d1a7 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -155,7 +155,7 @@ def any_allowed_skipna_inferred_dtype(request): >>> import pandas._libs.lib as lib >>> >>> def test_something(any_allowed_skipna_inferred_dtype): - ... inferred_dtype, values = any_skipna_inferred_dtype + ... inferred_dtype, values = any_allowed_skipna_inferred_dtype ... # will pass ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype """
Precursor to #24050 as requested from @jreback in review: https://github.com/pandas-dev/pandas/pull/24050/files#r243769223 / https://github.com/pandas-dev/pandas/pull/24050/files#r243769260
https://api.github.com/repos/pandas-dev/pandas/pulls/24560
2019-01-02T17:39:18Z
2019-01-03T01:46:55Z
2019-01-03T01:46:55Z
2019-01-03T09:56:46Z
REF/TST: mixed use of mock/monkeypatch
diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index 15f366e5e2e9e..d3569af8d7786 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -12,12 +12,6 @@ from pandas import DataFrame, compat import pandas.util.testing as tm -try: - from unittest import mock -except ImportError: - mock = pytest.importorskip("mock") - - api_exceptions = pytest.importorskip("google.api_core.exceptions") bigquery = pytest.importorskip("google.cloud.bigquery") service_account = pytest.importorskip("google.oauth2.service_account") @@ -104,8 +98,10 @@ def make_mixed_dataframe_v2(test_size): def test_read_gbq_without_dialect_warns_future_change(monkeypatch): # Default dialect is changing to standard SQL. See: # https://github.com/pydata/pandas-gbq/issues/195 - mock_read_gbq = mock.Mock() - mock_read_gbq.return_value = DataFrame([[1.0]]) + + def mock_read_gbq(*args, **kwargs): + return DataFrame([[1.0]]) + monkeypatch.setattr(pandas_gbq, 'read_gbq', mock_read_gbq) with tm.assert_produces_warning(FutureWarning): pd.read_gbq("SELECT 1")
doubt if this will work as don't have gbq configured locally. will need to check output of CI.
https://api.github.com/repos/pandas-dev/pandas/pulls/24557
2019-01-02T16:52:47Z
2019-01-05T14:50:26Z
2019-01-05T14:50:26Z
2019-01-05T21:03:12Z
POC: complete composition switchover
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ab5621d857e89..303d3016c8ac2 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -414,6 +414,10 @@ def _formatter(self, boxed=False): # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods + def ravel(self, method=None): + # no-op because we are always 1-D + return self + @property def nbytes(self): return self._data.nbytes diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8b0565a36648f..413c67895342c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -240,11 +240,14 @@ def _simple_new(cls, values, freq=None, tz=None): result._dtype = dtype return result - def __new__(cls, values, freq=None, tz=None, dtype=None, copy=False, - dayfirst=False, yearfirst=False, ambiguous='raise'): - return cls._from_sequence( + def __init__(self, values, freq=None, tz=None, dtype=None, copy=False, + dayfirst=False, yearfirst=False, ambiguous='raise'): + result = type(self)._from_sequence( values, freq=freq, tz=tz, dtype=dtype, copy=copy, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) + self._data = result._data + self._freq = result._freq + self._dtype = result._dtype @classmethod def _from_sequence(cls, data, dtype=None, copy=False, @@ -1571,6 +1574,10 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, # i.e. DatetimeArray/Index inferred_freq = data.freq + if isinstance(data, ABCIndexClass): + # need to unpack this after extracking `freq` + data = data._values + # if dtype has an embedded tz, capture it tz = validate_tz_from_dtype(dtype, tz) @@ -1750,7 +1757,8 @@ def maybe_convert_dtype(data, copy): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s - data = data.categories.take(data.codes, fill_value=NaT) + # Accessing ._values ensures that we don't return a CategoricalIndex + data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 78570be8dc07f..3f9045e1e826b 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -159,8 +159,11 @@ def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): result._freq = freq return result - def __new__(cls, values, freq=None, dtype=_TD_DTYPE, copy=False): - return cls._from_sequence(values, dtype=dtype, copy=copy, freq=freq) + def __init__(self, values, freq=None, dtype=_TD_DTYPE, copy=False): + result = type(self)._from_sequence(values, dtype=dtype, + copy=copy, freq=freq) + self._data = result._data + self._freq = result._freq @classmethod def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ab5e2a14c7783..3b963a0fdf409 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -78,6 +78,9 @@ from pandas.core import ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import Categorical, ExtensionArray +from pandas.core.arrays.datetimelike import ( + DatetimeLikeArrayMixin as DatetimeLikeArray +) from pandas.core.config import get_option from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, ensure_index, @@ -4356,9 +4359,28 @@ def _maybe_casted_values(index, labels=None): values.fill(np.nan) else: values = values.take(labels) + + # TODO(https://github.com/pandas-dev/pandas/issues/24206) + # Push this into maybe_upcast_putmask? + # We can't pass EAs there right now. Looks a bit + # complicated. + # So we unbox the ndarray_values, op, re-box. + values_type = type(values) + values_dtype = values.dtype + + if issubclass(values_type, DatetimeLikeArray): + # tests.frame.test_axis_select_reindex:test_reindex_nan + values = values._data + if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) + + if issubclass(values_type, DatetimeLikeArray): + # pass as i8 to preserve tz for DatetimeArray case + values = values_type(values.view('i8'), + dtype=values_dtype) + return values new_index = ibase.default_index(len(new_obj)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 50b2413167b32..9e899bc57dc70 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -71,6 +71,10 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): _maybe_mask_results = ea_passthrough("_maybe_mask_results") __iter__ = ea_passthrough("__iter__") + @property + def _eadata(self): + return self._data + @property def freq(self): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 690a3db28fe83..951aaabf2112a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -40,10 +40,6 @@ def _new_DatetimeIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ """ - # data are already in UTC - # so need to localize - tz = d.pop('tz', None) - if "data" in d and not isinstance(d["data"], DatetimeIndex): # Avoid need to verify integrity by calling simple_new directly data = d.pop("data") @@ -56,8 +52,6 @@ def _new_DatetimeIndex(cls, d): warnings.simplefilter("ignore") result = cls.__new__(cls, verify_integrity=False, **d) - if tz is not None: - result = result.tz_localize('UTC').tz_convert(tz) return result @@ -327,7 +321,7 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): dtarr = DatetimeArray._simple_new(values, freq=freq, tz=tz) result = object.__new__(cls) - result._eadata = dtarr + result._data = dtarr result.name = name # For groupby perf. See note in indexes/base about _index_data result._index_data = result._data @@ -418,7 +412,7 @@ def __setstate__(self, state): np.ndarray.__setstate__(data, state) dtarr = DatetimeArray(data) - self._eadata = dtarr + self._data = dtarr self._reset_identity() else: @@ -1128,10 +1122,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): # -------------------------------------------------------------------- # Wrapping DatetimeArray - @property - def _data(self): - return self._eadata._data - @property def tz(self): # GH#18595 diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 0798dd6eee0c9..4107c8bced77a 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -227,15 +227,16 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): # be timedelta64[ns] if present assert dtype == _TD_DTYPE - assert isinstance(values, np.ndarray), type(values) + assert isinstance(values, (np.ndarray, TimedeltaArray)), type(values) if values.dtype == 'i8': values = values.view('m8[ns]') assert values.dtype == 'm8[ns]', values.dtype freq = to_offset(freq) - tdarr = TimedeltaArray._simple_new(values, freq=freq) + tdarr = TimedeltaArray._simple_new(getattr(values, '_data', values), + freq=freq) result = object.__new__(cls) - result._eadata = tdarr + result._data = tdarr result.name = name # For groupby perf. See note in indexes/base about _index_data result._index_data = tdarr._data @@ -279,8 +280,9 @@ def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): # Wrapping TimedeltaArray @property - def _data(self): - return self._eadata._data + def _values(self): + # Note: without this TimedeltaIndex union ops break + return self._data._data __mul__ = _make_wrapped_arith_op("__mul__") __rmul__ = _make_wrapped_arith_op("__rmul__") diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 346f56968c963..06fa64ba99ac0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -35,7 +35,8 @@ import pandas.core.algorithms as algos from pandas.core.arrays import ( - Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray) + Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray, + TimedeltaArrayMixin as TimedeltaArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex @@ -2198,7 +2199,9 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): def __init__(self, values, placement, ndim=None): if values.dtype != _TD_DTYPE: values = conversion.ensure_timedelta64ns(values) - + if isinstance(values, TimedeltaArray): + values = values._data + assert isinstance(values, np.ndarray), type(values) super(TimeDeltaBlock, self).__init__(values, placement=placement, ndim=ndim) @@ -2299,6 +2302,9 @@ def to_native_types(self, slicer=None, na_rep=None, quoting=None, dtype=object) return rvalues + def external_values(self, dtype=None): + return np.asarray(self.values.astype("timedelta64[ns]", copy=False)) + class BoolBlock(NumericBlock): __slots__ = () diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index a4e925f6611f9..b9bbfaff06215 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -26,7 +26,7 @@ def test_int64_nocopy(self): # and copy=False arr = np.arange(10, dtype=np.int64) tdi = TimedeltaIndex(arr, copy=False) - assert tdi._data.base is arr + assert tdi._data._data.base is arr def test_infer_from_tdi(self): # GH#23539 diff --git a/pandas/tests/series/test_block_internals.py b/pandas/tests/series/test_block_internals.py index ccfb169cc2f8d..044fed92420ff 100644 --- a/pandas/tests/series/test_block_internals.py +++ b/pandas/tests/series/test_block_internals.py @@ -16,14 +16,14 @@ def test_setitem_invalidates_datetime_index_freq(self): ts = dti[1] ser = pd.Series(dti) assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert dti.freq == 'D' ser.iloc[1] = pd.NaT assert ser._values.freq is None # check that the DatetimeIndex was not altered in place assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert dti[1] == ts assert dti.freq == 'D' @@ -33,9 +33,10 @@ def test_dt64tz_setitem_does_not_mutate_dti(self): ts = dti[0] ser = pd.Series(dti) assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert ser._data.blocks[0].values is not dti - assert ser._data.blocks[0].values._data.base is not dti._data.base + assert (ser._data.blocks[0].values._data._data.base + is not dti._data._data.base) ser[::3] = pd.NaT assert ser[0] is pd.NaT
NOT FOR MERGING. per @TomAugspurger 's request, we're done splitting off parts of #24024. So this is just a proof of concept to get a handle on what parts that are left in 24024 are required for which goals. This makes DTI._data a DTA and TDI._data a TDA, then goes through and changes stuff until the tests pass locally. With the exception of a change to make TimedeltaIndex.union work, the changes are pretty much all taken from 24024 with minimal edits.
https://api.github.com/repos/pandas-dev/pandas/pulls/24556
2019-01-02T16:26:33Z
2019-01-02T19:24:25Z
null
2019-01-02T19:33:10Z
read_sas catches own error #24548
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 826c5a795f886..3566d58f5c641 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1599,6 +1599,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - :func:`read_sas()` will parse numbers in sas7bdat-files that have width less than 8 bytes correctly. (:issue:`21616`) - :func:`read_sas()` will correctly parse sas7bdat files with many columns (:issue:`22628`) - :func:`read_sas()` will correctly parse sas7bdat files with data page types having also bit 7 set (so page type is 128 + 256 = 384) (:issue:`16615`) +- Bug in :func:`read_sas()` in which an incorrect error was raised on an invalid file format. (:issue:`24548`) - Bug in :meth:`detect_client_encoding` where potential ``IOError`` goes unhandled when importing in a mod_wsgi process due to restricted access to stdout. (:issue:`21552`) - Bug in :func:`to_html()` with ``index=False`` misses truncation indicators (...) on truncated DataFrame (:issue:`15019`, :issue:`22783`) - Bug in :func:`to_html()` with ``index=False`` when both columns and row index are ``MultiIndex`` (:issue:`22579`) diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 2da3775d5a6a7..9fae0da670bec 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -16,8 +16,8 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None - If None, file format is inferred. If 'xport' or 'sas7bdat', - uses the corresponding format. + If None, file format is inferred from file extension. If 'xport' or + 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None @@ -39,16 +39,13 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, compat.string_types): raise ValueError(buffer_error_msg) - try: - fname = filepath_or_buffer.lower() - if fname.endswith(".xpt"): - format = "xport" - elif fname.endswith(".sas7bdat"): - format = "sas7bdat" - else: - raise ValueError("unable to infer format of SAS file") - except ValueError: - pass + fname = filepath_or_buffer.lower() + if fname.endswith(".xpt"): + format = "xport" + elif fname.endswith(".sas7bdat"): + format = "sas7bdat" + else: + raise ValueError("unable to infer format of SAS file") if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader diff --git a/pandas/tests/io/sas/test_sas.py b/pandas/tests/io/sas/test_sas.py index 0f6342aa62ac0..34bca1e5b74a1 100644 --- a/pandas/tests/io/sas/test_sas.py +++ b/pandas/tests/io/sas/test_sas.py @@ -3,6 +3,7 @@ from pandas.compat import StringIO from pandas import read_sas +import pandas.util.testing as tm class TestSas(object): @@ -15,3 +16,10 @@ def test_sas_buffer_format(self): "name, you must specify a format string") with pytest.raises(ValueError, match=msg): read_sas(b) + + def test_sas_read_no_format_or_extension(self): + # see gh-24548 + msg = ("unable to infer format of SAS file") + with tm.ensure_clean('test_file_no_extension') as path: + with pytest.raises(ValueError, match=msg): + read_sas(path)
- [ ] closes #24548 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24554
2019-01-02T15:47:13Z
2019-01-03T00:44:21Z
2019-01-03T00:44:21Z
2019-01-03T00:44:24Z
TST: isort tests/groupby
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 52bfee66f94f8..62ec0555f9033 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -4,15 +4,15 @@ test .agg behavior / note that .apply is tested generally in test_groupby.py """ +import numpy as np import pytest -import numpy as np -import pandas as pd +from pandas.compat import OrderedDict -from pandas import concat, DataFrame, Index, MultiIndex, Series -from pandas.core.groupby.grouper import Grouping +import pandas as pd +from pandas import DataFrame, Index, MultiIndex, Series, concat from pandas.core.base import SpecificationError -from pandas.compat import OrderedDict +from pandas.core.groupby.grouper import Grouping import pandas.util.testing as tm diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index ad5968bca5c03..ad3974d5e2fb8 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -6,13 +6,12 @@ from __future__ import print_function +import numpy as np import pytest -import numpy as np import pandas as pd - -from pandas import (bdate_range, DataFrame, Index, Series, Timestamp, - Timedelta, NaT) +from pandas import ( + DataFrame, Index, NaT, Series, Timedelta, Timestamp, bdate_range) from pandas.core.groupby.groupby import DataError import pandas.util.testing as tm diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index fca863b4d8eb0..b5214b11bddcc 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -6,22 +6,22 @@ from __future__ import print_function -import pytest from collections import OrderedDict - import datetime as dt from functools import partial import numpy as np -import pandas as pd +import pytest +import pandas as pd from pandas import ( - date_range, DataFrame, Index, MultiIndex, PeriodIndex, period_range, Series -) + DataFrame, Index, MultiIndex, PeriodIndex, Series, date_range, + period_range) from pandas.core.groupby.groupby import SpecificationError -from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm +from pandas.io.formats.printing import pprint_thing + def test_agg_api(): # GH 6337 diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py index 657da422bf02c..cb4fe511651ee 100644 --- a/pandas/tests/groupby/conftest.py +++ b/pandas/tests/groupby/conftest.py @@ -1,6 +1,7 @@ -import pytest import numpy as np -from pandas import MultiIndex, DataFrame +import pytest + +from pandas import DataFrame, MultiIndex from pandas.util import testing as tm diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 8366f75a5795e..659d1a9cf9813 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1,9 +1,11 @@ -import pytest +from datetime import datetime + import numpy as np +import pytest + import pandas as pd -from datetime import datetime +from pandas import DataFrame, Index, MultiIndex, Series, bdate_range, compat from pandas.util import testing as tm -from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index def test_apply_issues(): diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 9dcc13c15736f..f33df5fb0eb98 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- +import numpy as np +from numpy import nan import pytest -from numpy import nan -import numpy as np +from pandas._libs import groupby, lib, reduction from pandas.core.dtypes.common import ensure_int64 + from pandas import Index, isna from pandas.core.groupby.ops import generate_bins_generic -from pandas.util.testing import assert_almost_equal import pandas.util.testing as tm -from pandas._libs import lib, groupby, reduction +from pandas.util.testing import assert_almost_equal def test_series_grouper(): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index a39600d114b89..144b64025e1c0 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1,17 +1,19 @@ # -*- coding: utf-8 -*- from __future__ import print_function + from datetime import datetime +import numpy as np import pytest -import numpy as np -import pandas as pd from pandas.compat import PY37 -from pandas import (Index, MultiIndex, CategoricalIndex, - DataFrame, Categorical, Series, qcut) -from pandas.util.testing import (assert_equal, - assert_frame_equal, assert_series_equal) + +import pandas as pd +from pandas import ( + Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut) import pandas.util.testing as tm +from pandas.util.testing import ( + assert_equal, assert_frame_equal, assert_series_equal) def cartesian_product_for_groupers(result, args, names): diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 8b9f3607d5c3e..1438de5b7e37c 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -4,10 +4,10 @@ import numpy as np import pytest -from pandas import (DataFrame, Series, MultiIndex, Timestamp, Timedelta, - Period) -from pandas.util.testing import (assert_series_equal, assert_frame_equal) -from pandas.compat import (range, product as cart_product) +from pandas.compat import product as cart_product, range + +from pandas import DataFrame, MultiIndex, Period, Series, Timedelta, Timestamp +from pandas.util.testing import assert_frame_equal, assert_series_equal class TestCounting(object): diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 205b06c5b679f..8195d36b7bfe9 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- from __future__ import print_function -import pytest import numpy as np -import pandas.util.testing as tm -from pandas import Timestamp, DataFrame, Series +import pytest + import pandas as pd +from pandas import DataFrame, Series, Timestamp +import pandas.util.testing as tm def test_filter_series(): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 310a2fb1e609d..00714c3333bde 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1,14 +1,16 @@ -import pytest +from string import ascii_lowercase import numpy as np -import pandas as pd -from pandas import (DataFrame, Index, compat, isna, - Series, MultiIndex, Timestamp, date_range) +import pytest + +from pandas.compat import product as cart_product from pandas.errors import UnsupportedFunctionCall -from pandas.util import testing as tm + +import pandas as pd +from pandas import ( + DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna) import pandas.core.nanops as nanops -from string import ascii_lowercase -from pandas.compat import product as cart_product +from pandas.util import testing as tm @pytest.mark.parametrize("agg_func", ['any', 'all']) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e9de46bba03f1..33cfb9a06a805 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1,26 +1,25 @@ # -*- coding: utf-8 -*- from __future__ import print_function -import pytest - +from collections import defaultdict from datetime import datetime from decimal import Decimal -from pandas import (date_range, Timestamp, - Index, MultiIndex, DataFrame, Series, - Panel, read_csv) -from pandas.errors import PerformanceWarning -from pandas.util.testing import (assert_frame_equal, - assert_series_equal, assert_almost_equal) -from pandas.compat import (range, lrange, StringIO, lmap, lzip, map, zip, - OrderedDict) -from pandas import compat -from collections import defaultdict -import pandas.core.common as com import numpy as np +import pytest + +from pandas.compat import ( + OrderedDict, StringIO, lmap, lrange, lzip, map, range, zip) +from pandas.errors import PerformanceWarning -import pandas.util.testing as tm import pandas as pd +from pandas import ( + DataFrame, Index, MultiIndex, Panel, Series, Timestamp, compat, date_range, + read_csv) +import pandas.core.common as com +import pandas.util.testing as tm +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_series_equal) def test_repr(): diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index bcf4f42d8ca5e..55d9cee0376f1 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -2,25 +2,25 @@ """ test where we are determining what we are grouping, or getting groups """ +import numpy as np import pytest -from pandas import (date_range, Timestamp, - Index, MultiIndex, DataFrame, Series, CategoricalIndex) -from pandas.util.testing import (assert_panel_equal, assert_frame_equal, - assert_series_equal, assert_almost_equal) -from pandas.core.groupby.grouper import Grouping -from pandas.compat import lrange, long +from pandas.compat import long, lrange -from pandas import compat -import numpy as np - -import pandas.util.testing as tm import pandas as pd - +from pandas import ( + CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, compat, + date_range) +from pandas.core.groupby.grouper import Grouping +import pandas.util.testing as tm +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_panel_equal, + assert_series_equal) # selection # -------------------------------- + class TestSelection(object): def test_select_bad_cols(self): diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py index 6afa63c31e3b6..141381f84300b 100644 --- a/pandas/tests/groupby/test_index_as_string.py +++ b/pandas/tests/groupby/test_index_as_string.py @@ -1,7 +1,7 @@ -import pytest -import pandas as pd import numpy as np +import pytest +import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 4ea4b580a2c3f..255d9a8acf2d0 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -1,12 +1,12 @@ import numpy as np -import pandas as pd -from pandas import DataFrame, MultiIndex, Index, Series, isna, Timestamp +import pytest + from pandas.compat import lrange + +import pandas as pd +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna from pandas.util.testing import ( - assert_frame_equal, - assert_produces_warning, - assert_series_equal) -import pytest + assert_frame_equal, assert_produces_warning, assert_series_equal) def test_first_last_nth(df): diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index e58e12ab83143..9b0396bb530a1 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -1,5 +1,6 @@ -import pytest import numpy as np +import pytest + import pandas as pd from pandas import DataFrame, Series, concat from pandas.util import testing as tm diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index cb7b419710837..a2f2c1392b251 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -1,17 +1,17 @@ """ test with the TimeGrouper / grouping with datetimes """ -import pytest -import pytz - from datetime import datetime + import numpy as np from numpy import nan +import pytest +import pytz + +from pandas.compat import StringIO import pandas as pd -from pandas import (DataFrame, date_range, Index, - Series, MultiIndex, Timestamp) +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range from pandas.core.groupby.ops import BinGrouper -from pandas.compat import StringIO from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index b6361b4ad76a0..465ae67fd7318 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1,19 +1,19 @@ """ test with the .transform """ +import numpy as np import pytest -import numpy as np -import pandas as pd -from pandas.util import testing as tm -from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range -from pandas.core.dtypes.common import ( - ensure_platform_int, is_timedelta64_dtype) -from pandas.compat import StringIO from pandas._libs import groupby +from pandas.compat import StringIO -from pandas.util.testing import assert_frame_equal, assert_series_equal -from pandas.core.groupby.groupby import DataError +from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype + +import pandas as pd +from pandas import DataFrame, MultiIndex, Series, Timestamp, concat, date_range from pandas.core.config import option_context +from pandas.core.groupby.groupby import DataError +from pandas.util import testing as tm +from pandas.util.testing import assert_frame_equal, assert_series_equal def assert_fp_equal(a, b): diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index 1434656115d18..2b5f87aa59a8d 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -4,13 +4,13 @@ and proper parameter handling """ -import pytest - from itertools import product + import numpy as np +import pytest +from pandas import DataFrame, MultiIndex, Series, date_range from pandas.util import testing as tm -from pandas import MultiIndex, DataFrame, Series, date_range # our starting frame diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index a451acebcdba4..b7302b3911e58 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -3,10 +3,12 @@ the so-called white/black lists """ -import pytest from string import ascii_lowercase + import numpy as np -from pandas import DataFrame, Series, compat, date_range, Index, MultiIndex +import pytest + +from pandas import DataFrame, Index, MultiIndex, Series, compat, date_range from pandas.util import testing as tm AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew', diff --git a/setup.cfg b/setup.cfg index 59e5991914ca6..0738eae9cfd6d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -153,25 +153,6 @@ skip= pandas/tests/arithmetic/conftest.py, pandas/tests/arithmetic/test_timedelta64.py, pandas/tests/internals/test_internals.py, - pandas/tests/groupby/test_value_counts.py, - pandas/tests/groupby/test_filters.py, - pandas/tests/groupby/test_nth.py, - pandas/tests/groupby/test_timegrouper.py, - pandas/tests/groupby/test_transform.py, - pandas/tests/groupby/test_bin_groupby.py, - pandas/tests/groupby/test_index_as_string.py, - pandas/tests/groupby/test_groupby.py, - pandas/tests/groupby/test_whitelist.py, - pandas/tests/groupby/test_function.py, - pandas/tests/groupby/test_apply.py, - pandas/tests/groupby/conftest.py, - pandas/tests/groupby/test_counting.py, - pandas/tests/groupby/test_categorical.py, - pandas/tests/groupby/test_grouping.py, - pandas/tests/groupby/test_rank.py, - pandas/tests/groupby/aggregate/test_cython.py, - pandas/tests/groupby/aggregate/test_other.py, - pandas/tests/groupby/aggregate/test_aggregate.py, pandas/tests/plotting/test_datetimelike.py, pandas/tests/plotting/test_series.py, pandas/tests/plotting/test_groupby.py,
xref #23334
https://api.github.com/repos/pandas-dev/pandas/pulls/24553
2019-01-02T14:30:03Z
2019-01-02T17:56:21Z
2019-01-02T17:56:21Z
2019-01-02T17:57:49Z
DOC: fix some doc build warnings/errors
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 0c192a0aab24a..0f9726dc94816 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -1236,7 +1236,7 @@ the following Python code will read the binary file ``'binary.dat'`` into a pandas ``DataFrame``, where each element of the struct corresponds to a column in the frame: -.. ipython:: python +.. code-block:: python names = 'count', 'avg', 'scale' diff --git a/doc/source/integer_na.rst b/doc/source/integer_na.rst index befcf7016f155..eb0c5e3d05863 100644 --- a/doc/source/integer_na.rst +++ b/doc/source/integer_na.rst @@ -2,7 +2,7 @@ {{ header }} - .. _integer_na: +.. _integer_na: ************************** Nullable Integer Data Type diff --git a/doc/source/io.rst b/doc/source/io.rst index 3bbd4e8410fa5..967648f3a168a 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4880,7 +4880,7 @@ below and the SQLAlchemy `documentation <https://docs.sqlalchemy.org/en/latest/c If you want to manage your own connections you can pass one of those instead: -.. ipython:: python +.. code-block:: python with engine.connect() as conn, conn.begin(): data = pd.read_sql_table('data', conn) diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index a9234b83c78ab..a462f01dcd14f 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -759,12 +759,7 @@ the ``dtype="Int64"``. .. ipython:: python - s = pd.Series(np.random.randn(5), index=[0, 2, 4, 6, 7], - dtype="Int64") - s > 0 - (s > 0).dtype - crit = (s > 0).reindex(list(range(8))) - crit - crit.dtype + s = pd.Series([0, 1, np.nan, 3, 4], dtype="Int64") + s See :ref:`integer_na` for more. diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 59e8fa58fd9cf..84fca37318091 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1874,7 +1874,7 @@ has multiplied span. .. ipython:: python - pd.PeriodIndex(start='2014-01', freq='3M', periods=4) + pd.period_range(start='2014-01', freq='3M', periods=4) If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the @@ -1882,8 +1882,8 @@ endpoints for a ``PeriodIndex`` with frequency matching that of the .. ipython:: python - pd.PeriodIndex(start=pd.Period('2017Q1', freq='Q'), - end=pd.Period('2017Q2', freq='Q'), freq='M') + pd.period_range(start=pd.Period('2017Q1', freq='Q'), + end=pd.Period('2017Q2', freq='Q'), freq='M') Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas objects: diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index f4f33a921dcce..8e23c643280c1 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -33,7 +33,7 @@ repository <http://github.com/jvns/pandas-cookbook>`_. Learn Pandas by Hernan Rojas ---------------------------- -A set of lesson for new pandas users: `Learn pandas <https://bitbucket.org/hrojas/learn-pandas>`__. +A set of lesson for new pandas users: https://bitbucket.org/hrojas/learn-pandas Practical data analysis with Python ----------------------------------- diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index c78d5c1d178d2..c720e075012eb 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -189,14 +189,15 @@ URLs and paths are now inferred using their file extensions. Additionally, support for bz2 compression in the python 2 C-engine improved (:issue:`14874`). .. ipython:: python - :okwarning: url = ('https://github.com/{repo}/raw/{branch}/{path}' .format(repo='pandas-dev/pandas', branch='master', path='pandas/tests/io/parser/data/salaries.csv.bz2')) - df = pd.read_table(url, compression='infer') # default, infer compression - df = pd.read_table(url, compression='bz2') # explicitly specify compression + # default, infer compression + df = pd.read_csv(url, sep='\t', compression='infer') + # explicitly specify compression + df = pd.read_csv(url, sep='\t', compression='bz2') df.head(2) .. _whatsnew_0200.enhancements.pickle_compression:
Some follow-ups on recent doc PRs
https://api.github.com/repos/pandas-dev/pandas/pulls/24552
2019-01-02T14:29:31Z
2019-01-02T17:03:17Z
2019-01-02T17:03:17Z
2019-01-02T17:07:46Z
TST: isort tests/test_*
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 8d7fd6449b354..294eae9d45bee 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1,28 +1,30 @@ # -*- coding: utf-8 -*- -import numpy as np -import pytest - -from numpy.random import RandomState -from numpy import nan from datetime import datetime from itertools import permutations import struct -from pandas import (Series, Categorical, CategoricalIndex, - Timestamp, DatetimeIndex, Index, IntervalIndex) -import pandas as pd -from pandas import compat -from pandas._libs import (groupby as libgroupby, algos as libalgos, - hashtable as ht) +import numpy as np +from numpy import nan +from numpy.random import RandomState +import pytest + +from pandas._libs import ( + algos as libalgos, groupby as libgroupby, hashtable as ht) from pandas.compat import lrange, range -from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.compat.numpy import np_array_datetime64_compat +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import CategoricalDtype as CDT + +import pandas as pd +from pandas import ( + Categorical, CategoricalIndex, DatetimeIndex, Index, IntervalIndex, Series, + Timestamp, compat) import pandas.core.algorithms as algos +from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray import pandas.core.common as com import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.core.dtypes.dtypes import CategoricalDtype as CDT -from pandas.compat.numpy import np_array_datetime64_compat from pandas.util.testing import assert_almost_equal diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index f941f2ff32fa1..85650a9b0df0d 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1,32 +1,33 @@ # -*- coding: utf-8 -*- from __future__ import print_function +from datetime import datetime, timedelta import re import sys -from datetime import datetime, timedelta -import pytest + import numpy as np +import pytest -import pandas as pd +from pandas._libs.tslib import iNaT import pandas.compat as compat -from pandas.core.dtypes.common import ( - is_object_dtype, is_datetime64_dtype, is_datetime64tz_dtype, - needs_i8_conversion, is_timedelta64_dtype) -import pandas.util.testing as tm -from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, - PeriodIndex, Timedelta, IntervalIndex, Interval, - CategoricalIndex, Timestamp, DataFrame, Panel) -from pandas.core.arrays import ( - PandasArray, - DatetimeArrayMixin as DatetimeArray, - TimedeltaArrayMixin as TimedeltaArray, -) -from pandas.compat import StringIO, PYPY, long +from pandas.compat import PYPY, StringIO, long from pandas.compat.numpy import np_array_datetime64_compat + +from pandas.core.dtypes.common import ( + is_datetime64_dtype, is_datetime64tz_dtype, is_object_dtype, + is_timedelta64_dtype, needs_i8_conversion) + +import pandas as pd +from pandas import ( + CategoricalIndex, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex, + Panel, PeriodIndex, Series, Timedelta, TimedeltaIndex, Timestamp) from pandas.core.accessor import PandasDelegate -from pandas.core.base import PandasObject, NoNewAttributesMixin +from pandas.core.arrays import ( + DatetimeArrayMixin as DatetimeArray, PandasArray, + TimedeltaArrayMixin as TimedeltaArray) +from pandas.core.base import NoNewAttributesMixin, PandasObject from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -from pandas._libs.tslib import iNaT +import pandas.util.testing as tm class CheckStringMixin(object): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index ae46bee901ff2..18eb760e31db8 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,18 +1,15 @@ # -*- coding: utf-8 -*- import collections -import string from functools import partial +import string import numpy as np import pytest import pandas as pd from pandas import Series, Timestamp -from pandas.core import ( - common as com, - ops, -) +from pandas.core import common as com, ops def test_get_callable_name(): diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index 79d3aad493182..d1a3ee43a4623 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -3,11 +3,13 @@ Testing that functions from compat work as expected """ -import pytest import re -from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap, - lfilter, builtins, iterkeys, itervalues, iteritems, - next, get_range_parameters, PY2, re_type) + +import pytest + +from pandas.compat import ( + PY2, builtins, filter, get_range_parameters, iteritems, iterkeys, + itervalues, lfilter, lmap, lrange, lzip, map, next, range, re_type, zip) class TestBuiltinIterators(object): diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index fd8e98c483f78..2cdcb948eb917 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- +import warnings + import pytest import pandas as pd -import warnings - class TestConfig(object): diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 1d17b514a5b67..e22b9a0ef25e3 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -2,15 +2,17 @@ """ Testing that we work in the downstream packages """ +import importlib import subprocess import sys -import pytest import numpy as np # noqa -from pandas import DataFrame +import pytest + from pandas.compat import PY36 + +from pandas import DataFrame from pandas.util import testing as tm -import importlib def import_module(name): diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index c5ea69b5ec46f..d3b6a237a97a1 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- import pytest -import pandas # noqa -import pandas as pd + from pandas.errors import AbstractMethodError +import pandas as pd # noqa + @pytest.mark.parametrize( "exc", ['UnsupportedFunctionCall', 'UnsortedIndexError', diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index cc5ae9b15ba9e..f5aa0b0b3c9c8 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -1,23 +1,25 @@ # -*- coding: utf-8 -*- from __future__ import print_function -# pylint: disable-msg=W0612,E1101 -from warnings import catch_warnings, simplefilter -import re import operator -import pytest - -from numpy.random import randn +import re +from warnings import catch_warnings, simplefilter import numpy as np +from numpy.random import randn +import pytest +from pandas import _np_version_under1p13, compat from pandas.core.api import DataFrame, Panel from pandas.core.computation import expressions as expr -from pandas import compat, _np_version_under1p13 -from pandas.util.testing import (assert_almost_equal, assert_series_equal, - assert_frame_equal, assert_panel_equal) -from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_panel_equal, + assert_series_equal) + +from pandas.io.formats.printing import pprint_thing + +# pylint: disable-msg=W0612,E1101 _frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64') diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index af946436b55c7..5b6656de15731 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -1,9 +1,10 @@ # -*- coding: utf-8 -*- import numpy as np -from pandas import Index, DataFrame, Categorical, merge from pandas._libs import join as _join + +from pandas import Categorical, DataFrame, Index, merge import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_frame_equal diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index d0812eae80f2d..c5dcfc89faa67 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- +import numpy as np import pytest -import numpy as np -from pandas import Index from pandas._libs import lib, writers as libwriters + +from pandas import Index import pandas.util.testing as tm diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index ce95f0f86ef7b..b5023c376dedd 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1,23 +1,23 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101,W0141 -from warnings import catch_warnings, simplefilter import datetime import itertools +from warnings import catch_warnings, simplefilter +import numpy as np +from numpy.random import randn import pytest import pytz -from numpy.random import randn -import numpy as np - -from pandas.core.index import Index, MultiIndex -from pandas import (Panel, DataFrame, Series, isna, Timestamp) +from pandas.compat import ( + StringIO, lrange, lzip, product as cart_product, range, u, zip) from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype -import pandas.util.testing as tm -from pandas.compat import (range, lrange, StringIO, lzip, u, product as - cart_product, zip) + import pandas as pd +from pandas import DataFrame, Panel, Series, Timestamp, isna +from pandas.core.index import Index, MultiIndex +import pandas.util.testing as tm AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew', 'mad', 'std', 'var', 'sem'] diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 1e08914811402..cc793767d3af6 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1,20 +1,22 @@ # -*- coding: utf-8 -*- from __future__ import division, print_function -import warnings from functools import partial +import warnings import numpy as np import pytest -import pandas as pd -import pandas.core.nanops as nanops -import pandas.util._test_decorators as td -import pandas.util.testing as tm -from pandas import Series, isna from pandas.compat.numpy import _np_version_under1p13 +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import Series, isna from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +import pandas.core.nanops as nanops +import pandas.util.testing as tm use_bn = nanops._USE_BOTTLENECK diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 33f2c34400373..5539778e1d187 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1,31 +1,31 @@ # -*- coding: utf-8 -*- # pylint: disable=W0612,E1101 -from warnings import catch_warnings, simplefilter from datetime import datetime import operator -import pytest +from warnings import catch_warnings, simplefilter import numpy as np +import pytest + +from pandas.compat import OrderedDict, StringIO, lrange, range, signature +import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_float_dtype -from pandas import (Series, DataFrame, Index, date_range, isna, notna, - MultiIndex) + +from pandas import ( + DataFrame, Index, MultiIndex, Series, compat, date_range, isna, notna) from pandas.core.nanops import nanall, nanany +import pandas.core.panel as panelm from pandas.core.panel import Panel +import pandas.util.testing as tm +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_panel_equal, + assert_series_equal, ensure_clean, makeCustomDataframe as mkdf, + makeMixedDataFrame) from pandas.io.formats.printing import pprint_thing -from pandas import compat -from pandas.compat import range, lrange, StringIO, OrderedDict, signature - from pandas.tseries.offsets import BDay, MonthEnd -from pandas.util.testing import (assert_panel_equal, assert_frame_equal, - assert_series_equal, assert_almost_equal, - ensure_clean, makeMixedDataFrame, - makeCustomDataframe as mkdf) -import pandas.core.panel as panelm -import pandas.util.testing as tm -import pandas.util._test_decorators as td def make_test_panel(): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 333b93dbdf580..7500cbb3cfc3a 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -1,21 +1,19 @@ -import pytest -from itertools import product from collections import defaultdict -import warnings from datetime import datetime +from itertools import product +import warnings import numpy as np from numpy import nan +import pytest + +from pandas import DataFrame, MultiIndex, Series, compat, concat, merge from pandas.core import common as com -from pandas import DataFrame, MultiIndex, merge, concat, Series, compat +from pandas.core.sorting import ( + decons_group_index, get_group_index, is_int64_overflow_possible, + lexsort_indexer, nargsort, safe_sort) from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal -from pandas.core.sorting import (is_int64_overflow_possible, - decons_group_index, - get_group_index, - nargsort, - lexsort_indexer, - safe_sort) class TestSorting(object): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index c5a4e9511a6ef..d4ea21632edf9 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2,21 +2,20 @@ # pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta -import pytest import re -from numpy import nan as NA import numpy as np +from numpy import nan as NA from numpy.random import randint +import pytest -from pandas.compat import range, u, PY3 import pandas.compat as compat -from pandas import Index, Series, DataFrame, isna, MultiIndex, notna, concat - -from pandas.util.testing import assert_series_equal, assert_index_equal -import pandas.util.testing as tm +from pandas.compat import PY3, range, u +from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna import pandas.core.strings as strings +import pandas.util.testing as tm +from pandas.util.testing import assert_index_equal, assert_series_equal def assert_series_or_index_equal(left, right): diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 69150ee3c5454..c9e4ed90b1dea 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -1,13 +1,15 @@ # -*- coding: utf-8 -*- -import re from datetime import datetime +import re import numpy as np import pytest + +from pandas._libs.tslib import iNaT from pandas.compat import long + import pandas.core.algorithms as algos import pandas.util.testing as tm -from pandas._libs.tslib import iNaT @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index b53aca2c9852b..412f70a3cb516 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -1,24 +1,26 @@ from collections import OrderedDict +from datetime import datetime, timedelta from itertools import product -import pytest import warnings from warnings import catch_warnings -from datetime import datetime, timedelta -from numpy.random import randn import numpy as np +from numpy.random import randn +import pytest + +from pandas.compat import range, zip +from pandas.errors import UnsupportedFunctionCall +import pandas.util._test_decorators as td import pandas as pd -from pandas import (Series, DataFrame, bdate_range, - isna, notna, concat, Timestamp, Index) -import pandas.core.window as rwindow -import pandas.tseries.offsets as offsets +from pandas import ( + DataFrame, Index, Series, Timestamp, bdate_range, concat, isna, notna) from pandas.core.base import SpecificationError -from pandas.errors import UnsupportedFunctionCall from pandas.core.sorting import safe_sort +import pandas.core.window as rwindow import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.compat import range, zip + +import pandas.tseries.offsets as offsets N, K = 100, 10 diff --git a/setup.cfg b/setup.cfg index 59e5991914ca6..26d08ba604c97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -118,24 +118,6 @@ force_sort_within_sections=True skip= pandas/core/api.py, pandas/core/frame.py, - pandas/tests/test_errors.py, - pandas/tests/test_base.py, - pandas/tests/test_register_accessor.py, - pandas/tests/test_window.py, - pandas/tests/test_downstream.py, - pandas/tests/test_multilevel.py, - pandas/tests/test_common.py, - pandas/tests/test_compat.py, - pandas/tests/test_sorting.py, - pandas/tests/test_algos.py, - pandas/tests/test_expressions.py, - pandas/tests/test_strings.py, - pandas/tests/test_lib.py, - pandas/tests/test_join.py, - pandas/tests/test_panel.py, - pandas/tests/test_take.py, - pandas/tests/test_nanops.py, - pandas/tests/test_config.py, pandas/tests/api/test_types.py, pandas/tests/api/test_api.py, pandas/tests/tools/test_numeric.py,
xref #23334
https://api.github.com/repos/pandas-dev/pandas/pulls/24551
2019-01-02T14:24:47Z
2019-01-02T16:39:12Z
2019-01-02T16:39:12Z
2019-01-02T17:04:31Z
TST: isort tests/reshape
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 083ce16ef9296..8ee1e49f01ac1 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -1,20 +1,20 @@ # pylint: disable=E1103 from warnings import catch_warnings -from numpy.random import randn + import numpy as np +from numpy.random import randn import pytest -import pandas as pd -from pandas.compat import lrange +from pandas._libs import join as libjoin import pandas.compat as compat -from pandas.util.testing import assert_frame_equal -from pandas import DataFrame, MultiIndex, Series, Index, merge, concat +from pandas.compat import lrange -from pandas._libs import join as libjoin +import pandas as pd +from pandas import DataFrame, Index, MultiIndex, Series, concat, merge +from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data import pandas.util.testing as tm -from pandas.tests.reshape.merge.test_merge import get_test_data, N, NGROUPS - +from pandas.util.testing import assert_frame_equal a_ = np.array diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 970802e94662a..f6882e9bc8394 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1,25 +1,27 @@ # pylint: disable=E1103 -import random -import re from collections import OrderedDict from datetime import date, datetime +import random +import re import numpy as np -import pytest from numpy import nan +import pytest -import pandas as pd -import pandas.util.testing as tm -from pandas import (Categorical, CategoricalIndex, DataFrame, DatetimeIndex, - Float64Index, Int64Index, MultiIndex, RangeIndex, - Series, UInt64Index) -from pandas.api.types import CategoricalDtype as CDT from pandas.compat import lrange + from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, + Int64Index, MultiIndex, RangeIndex, Series, UInt64Index) +from pandas.api.types import CategoricalDtype as CDT from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import MergeError, merge +import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal N = 50 diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 3035412d7b836..1483654daa99e 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1,10 +1,9 @@ +import numpy as np import pytest - import pytz -import numpy as np + import pandas as pd -from pandas import (merge_asof, read_csv, - to_datetime, Timedelta) +from pandas import Timedelta, merge_asof, read_csv, to_datetime from pandas.core.reshape.merge import MergeError from pandas.util.testing import assert_frame_equal diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py index 0f8ecc6370bfd..414f46cdb296c 100644 --- a/pandas/tests/reshape/merge/test_merge_ordered.py +++ b/pandas/tests/reshape/merge/test_merge_ordered.py @@ -1,10 +1,10 @@ +from numpy import nan import pytest + import pandas as pd from pandas import DataFrame, merge_ordered from pandas.util.testing import assert_frame_equal -from numpy import nan - class TestMergeOrdered(object): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 0706cb12ac5d0..051462c5e9fc6 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1,27 +1,26 @@ -from warnings import catch_warnings, simplefilter -from itertools import combinations from collections import deque +import datetime as dt +from datetime import datetime from decimal import Decimal +from itertools import combinations +from warnings import catch_warnings, simplefilter -import datetime as dt import dateutil import numpy as np from numpy.random import randn +import pytest + +from pandas.compat import PY2, Iterable, StringIO, iteritems -from datetime import datetime -from pandas.compat import Iterable, StringIO, iteritems, PY2 -import pandas as pd -from pandas import (DataFrame, concat, - read_csv, isna, Series, date_range, - Index, Panel, MultiIndex, Timestamp, - DatetimeIndex, Categorical) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.util import testing as tm -from pandas.util.testing import (assert_frame_equal, - makeCustomDataframe as mkdf) -from pandas.tests.extension.decimal import to_decimal -import pytest +import pandas as pd +from pandas import ( + Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Panel, Series, + Timestamp, concat, date_range, isna, read_csv) +from pandas.tests.extension.decimal import to_decimal +from pandas.util import testing as tm +from pandas.util.testing import assert_frame_equal, makeCustomDataframe as mkdf @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 8fd3ae8bb387b..6b633d7e77f52 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -1,17 +1,15 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 +import numpy as np +from numpy import nan import pytest -from pandas import DataFrame -import pandas as pd - -from numpy import nan -import numpy as np +from pandas.compat import range -from pandas import melt, lreshape, wide_to_long +import pandas as pd +from pandas import DataFrame, lreshape, melt, wide_to_long import pandas.util.testing as tm -from pandas.compat import range class TestMelt(object): diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index a2b5eacd873bb..f0d1ad57ba829 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1,20 +1,20 @@ # -*- coding: utf-8 -*- -from datetime import datetime, date, timedelta +from collections import OrderedDict +from datetime import date, datetime, timedelta +import numpy as np import pytest +from pandas.compat import product, range -import numpy as np - -from collections import OrderedDict import pandas as pd -from pandas import (DataFrame, Series, Index, MultiIndex, - Grouper, date_range, concat, Categorical) -from pandas.core.reshape.pivot import pivot_table, crosstab -from pandas.compat import range, product -import pandas.util.testing as tm +from pandas import ( + Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat, + date_range) from pandas.api.types import CategoricalDtype as CDT +from pandas.core.reshape.pivot import crosstab, pivot_table +import pandas.util.testing as tm @pytest.fixture(params=[True, False]) diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index edbe70d308b96..7b544b7981c1f 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -1,22 +1,21 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 -import pytest from collections import OrderedDict -from pandas import DataFrame, Series -from pandas.core.dtypes.common import is_integer_dtype -from pandas.core.sparse.api import SparseDtype, SparseArray -import pandas as pd - -from numpy import nan import numpy as np +from numpy import nan +import pytest -from pandas.util.testing import assert_frame_equal +from pandas.compat import u + +from pandas.core.dtypes.common import is_integer_dtype -from pandas import get_dummies, Categorical, Index +import pandas as pd +from pandas import Categorical, DataFrame, Index, Series, get_dummies +from pandas.core.sparse.api import SparseArray, SparseDtype import pandas.util.testing as tm -from pandas.compat import u +from pandas.util.testing import assert_frame_equal class TestGetDummies(object): diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py index 80538b0c6de4e..9b2b8bf9ed49f 100644 --- a/pandas/tests/reshape/test_union_categoricals.py +++ b/pandas/tests/reshape/test_union_categoricals.py @@ -1,9 +1,10 @@ +import numpy as np import pytest -import numpy as np -import pandas as pd -from pandas import Categorical, Series, CategoricalIndex from pandas.core.dtypes.concat import union_categoricals + +import pandas as pd +from pandas import Categorical, CategoricalIndex, Series from pandas.util import testing as tm diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py index e7e1626bdb2da..a8d9e7a775442 100644 --- a/pandas/tests/reshape/test_util.py +++ b/pandas/tests/reshape/test_util.py @@ -1,8 +1,9 @@ -import pytest import numpy as np -from pandas import date_range, Index -import pandas.util.testing as tm +import pytest + +from pandas import Index, date_range from pandas.core.reshape.util import cartesian_product +import pandas.util.testing as tm class TestCartesianProduct(object): diff --git a/setup.cfg b/setup.cfg index 59e5991914ca6..15a5384bc632c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -182,18 +182,6 @@ skip= pandas/tests/plotting/common.py, pandas/tests/plotting/test_boxplot_method.py, pandas/tests/plotting/test_deprecated.py, - pandas/tests/reshape/test_concat.py, - pandas/tests/reshape/test_util.py, - pandas/tests/reshape/test_reshape.py, - pandas/tests/reshape/test_tile.py, - pandas/tests/reshape/test_pivot.py, - pandas/tests/reshape/test_melt.py, - pandas/tests/reshape/test_union_categoricals.py, - pandas/tests/reshape/merge/test_merge_index_as_string.py, - pandas/tests/reshape/merge/test_merge.py, - pandas/tests/reshape/merge/test_merge_asof.py, - pandas/tests/reshape/merge/test_join.py, - pandas/tests/reshape/merge/test_merge_ordered.py, pandas/tests/sparse/test_indexing.py, pandas/tests/extension/test_sparse.py, pandas/tests/extension/base/reduce.py,
xref #23334
https://api.github.com/repos/pandas-dev/pandas/pulls/24550
2019-01-02T14:18:57Z
2019-01-02T15:16:33Z
2019-01-02T15:16:33Z
2019-01-02T15:20:52Z
DOC: exclude autogenerated c/cpp/html files from 'trailing whitespace' checks
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index d16249724127f..87a1c8ae33489 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -93,7 +93,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # this particular codebase (e.g. src/headers, src/klib, src/msgpack). However, # we can lint all header files since they aren't "generated" like C files are. MSG='Linting .c and .h' ; echo $MSG - cpplint --quiet --extensions=c,h --headers=h --recursive --filter=-readability/casting,-runtime/int,-build/include_subdir pandas/_libs/src/*.h pandas/_libs/src/parser pandas/_libs/ujson pandas/_libs/tslibs/src/datetime + cpplint --quiet --extensions=c,h --headers=h --recursive --filter=-readability/casting,-runtime/int,-build/include_subdir pandas/_libs/src/*.h pandas/_libs/src/parser pandas/_libs/ujson pandas/_libs/tslibs/src/datetime pandas/io/msgpack pandas/_libs/*.cpp pandas/util RET=$(($RET + $?)) ; echo $MSG "DONE" echo "isort --version-number" @@ -174,9 +174,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then MSG='Check that no file in the repo contains tailing whitespaces' ; echo $MSG set -o pipefail if [[ "$AZURE" == "true" ]]; then - ! grep -n --exclude="*.svg" -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' + # we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files + ! grep -n '--exclude=*.'{svg,c,cpp,html} -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' else - ! grep -n --exclude="*.svg" -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' + ! grep -n '--exclude=*.'{svg,c,cpp,html} -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' fi RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/pandas/util/move.c b/pandas/util/move.c index 62860adb1c1f6..9bb662d50cb3f 100644 --- a/pandas/util/move.c +++ b/pandas/util/move.c @@ -1,3 +1,12 @@ +/* +Copyright (c) 2019, PyData Development Team +All rights reserved. + +Distributed under the terms of the BSD Simplified License. + +The full license is in the LICENSE file, distributed with this software. +*/ + #include <Python.h> #define COMPILING_IN_PY2 (PY_VERSION_HEX <= 0x03000000) @@ -31,15 +40,13 @@ typedef struct { static PyTypeObject stolenbuf_type; /* forward declare type */ static void -stolenbuf_dealloc(stolenbufobject *self) -{ +stolenbuf_dealloc(stolenbufobject *self) { Py_DECREF(self->invalid_bytes); PyObject_Del(self); } static int -stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) -{ +stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) { return PyBuffer_FillInfo(view, (PyObject*) self, (void*) PyString_AS_STRING(self->invalid_bytes), @@ -51,8 +58,8 @@ stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) #if COMPILING_IN_PY2 static Py_ssize_t -stolenbuf_getreadwritebuf(stolenbufobject *self, Py_ssize_t segment, void **out) -{ +stolenbuf_getreadwritebuf(stolenbufobject *self, + Py_ssize_t segment, void **out) { if (segment != 0) { PyErr_SetString(PyExc_SystemError, "accessing non-existent string segment"); @@ -63,8 +70,7 @@ stolenbuf_getreadwritebuf(stolenbufobject *self, Py_ssize_t segment, void **out) } static Py_ssize_t -stolenbuf_getsegcount(stolenbufobject *self, Py_ssize_t *len) -{ +stolenbuf_getsegcount(stolenbufobject *self, Py_ssize_t *len) { if (len) { *len = PyString_GET_SIZE(self->invalid_bytes); } @@ -157,8 +163,7 @@ PyDoc_STRVAR( however, if called through *unpacking like ``stolenbuf(*(a,))`` it would only have the one reference (the tuple). */ static PyObject* -move_into_mutable_buffer(PyObject *self, PyObject *bytes_rvalue) -{ +move_into_mutable_buffer(PyObject *self, PyObject *bytes_rvalue) { stolenbufobject *ret; if (!PyString_CheckExact(bytes_rvalue)) {
- [x] closes #24526 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24549
2019-01-02T13:39:49Z
2019-02-08T02:51:21Z
2019-02-08T02:51:21Z
2019-02-08T15:00:36Z
BUG: (row) Index Name with to_html(header=False) is not displayed
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 302f2bd05ee5c..43229f3f674d0 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1600,6 +1600,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - Bug in :func:`to_html()` with ``index=False`` misses truncation indicators (...) on truncated DataFrame (:issue:`15019`, :issue:`22783`) - Bug in :func:`to_html()` with ``index=False`` when both columns and row index are ``MultiIndex`` (:issue:`22579`) - Bug in :func:`to_html()` with ``index_names=False`` displaying index name (:issue:`22747`) +- Bug in :func:`to_html()` with ``header=False`` not displaying row index names (:issue:`23788`) - Bug in :func:`DataFrame.to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`) - Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`) - Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`). diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index eb11dd461927b..58f5364f2b523 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -43,6 +43,12 @@ def __init__(self, formatter, classes=None, notebook=False, border=None, self.table_id = table_id self.render_links = render_links + @property + def show_row_idx_names(self): + return all((self.fmt.has_index_names, + self.fmt.index, + self.fmt.show_index_names)) + @property def show_col_idx_names(self): # see gh-22579 @@ -165,9 +171,7 @@ def write_style(self): element_props.append(('thead tr th', 'text-align', 'left')) - if all((self.fmt.has_index_names, - self.fmt.index, - self.fmt.show_index_names)): + if self.show_row_idx_names: element_props.append(('thead tr:last-of-type th', 'text-align', 'right')) @@ -228,17 +232,8 @@ def write_result(self, buf): buffer_put_lines(buf, self.elements) - def _write_header(self, indent): + def _write_col_header(self, indent): truncate_h = self.fmt.truncate_h - - if not self.fmt.header: - # write nothing - return indent - - self.write('<thead>', indent) - - indent += self.indent_delta - if isinstance(self.columns, ABCMultiIndex): template = 'colspan="{span:d}" halign="left"' @@ -357,12 +352,25 @@ def _write_header(self, indent): self.write_tr(row, indent, self.indent_delta, header=True, align=align) - if all((self.fmt.has_index_names, - self.fmt.index, - self.fmt.show_index_names)): - row = ([x if x is not None else '' for x in self.frame.index.names] - + [''] * (self.ncols + (1 if truncate_h else 0))) - self.write_tr(row, indent, self.indent_delta, header=True) + def _write_row_header(self, indent): + truncate_h = self.fmt.truncate_h + row = ([x if x is not None else '' for x in self.frame.index.names] + + [''] * (self.ncols + (1 if truncate_h else 0))) + self.write_tr(row, indent, self.indent_delta, header=True) + + def _write_header(self, indent): + if not (self.fmt.header or self.show_row_idx_names): + # write nothing + return indent + + self.write('<thead>', indent) + indent += self.indent_delta + + if self.fmt.header: + self._write_col_header(indent) + + if self.show_row_idx_names: + self._write_row_header(indent) indent -= self.indent_delta self.write('</thead>', indent) diff --git a/pandas/tests/io/formats/data/html/index_named_multi_columns_none.html b/pandas/tests/io/formats/data/html/index_named_multi_columns_none.html new file mode 100644 index 0000000000000..8c41d2e29f2c0 --- /dev/null +++ b/pandas/tests/io/formats/data/html/index_named_multi_columns_none.html @@ -0,0 +1,23 @@ +<table border="1" class="dataframe"> + <thead> + <tr> + <th>index.name.0</th> + <th>index.name.1</th> + <th></th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th rowspan="2" valign="top">a</th> + <th>b</th> + <td>0</td> + <td>0</td> + </tr> + <tr> + <th>c</th> + <td>0</td> + <td>0</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/data/html/index_named_standard_columns_none.html b/pandas/tests/io/formats/data/html/index_named_standard_columns_none.html new file mode 100644 index 0000000000000..432d8e06d5784 --- /dev/null +++ b/pandas/tests/io/formats/data/html/index_named_standard_columns_none.html @@ -0,0 +1,21 @@ +<table border="1" class="dataframe"> + <thead> + <tr> + <th>index.name</th> + <th></th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>0</td> + <td>0</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/data/html/index_unnamed_multi_columns_none.html b/pandas/tests/io/formats/data/html/index_unnamed_multi_columns_none.html new file mode 100644 index 0000000000000..81da7c3619abc --- /dev/null +++ b/pandas/tests/io/formats/data/html/index_unnamed_multi_columns_none.html @@ -0,0 +1,15 @@ +<table border="1" class="dataframe"> + <tbody> + <tr> + <th rowspan="2" valign="top">a</th> + <th>b</th> + <td>0</td> + <td>0</td> + </tr> + <tr> + <th>c</th> + <td>0</td> + <td>0</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/data/html/index_unnamed_standard_columns_none.html b/pandas/tests/io/formats/data/html/index_unnamed_standard_columns_none.html new file mode 100644 index 0000000000000..3d958afe4a4ac --- /dev/null +++ b/pandas/tests/io/formats/data/html/index_unnamed_standard_columns_none.html @@ -0,0 +1,14 @@ +<table border="1" class="dataframe"> + <tbody> + <tr> + <th>0</th> + <td>0</td> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>0</td> + <td>0</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/data/html/trunc_df_index_named_multi_columns_none.html b/pandas/tests/io/formats/data/html/trunc_df_index_named_multi_columns_none.html new file mode 100644 index 0000000000000..0f262495b6c6b --- /dev/null +++ b/pandas/tests/io/formats/data/html/trunc_df_index_named_multi_columns_none.html @@ -0,0 +1,62 @@ +<table border="1" class="dataframe"> + <thead> + <tr> + <th>foo</th> + <th></th> + <th>baz</th> + <th></th> + <th></th> + <th></th> + <th></th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th rowspan="2" valign="top">a</th> + <th rowspan="2" valign="top">c</th> + <th>e</th> + <td>0</td> + <td>1</td> + <td>...</td> + <td>6</td> + <td>7</td> + </tr> + <tr> + <th>f</th> + <td>8</td> + <td>9</td> + <td>...</td> + <td>14</td> + <td>15</td> + </tr> + <tr> + <th>...</th> + <th>...</th> + <th>...</th> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + </tr> + <tr> + <th rowspan="2" valign="top">b</th> + <th rowspan="2" valign="top">d</th> + <th>e</th> + <td>48</td> + <td>49</td> + <td>...</td> + <td>54</td> + <td>55</td> + </tr> + <tr> + <th>f</th> + <td>56</td> + <td>57</td> + <td>...</td> + <td>62</td> + <td>63</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/data/html/trunc_df_index_named_standard_columns_none.html b/pandas/tests/io/formats/data/html/trunc_df_index_named_standard_columns_none.html new file mode 100644 index 0000000000000..d294a507dbce4 --- /dev/null +++ b/pandas/tests/io/formats/data/html/trunc_df_index_named_standard_columns_none.html @@ -0,0 +1,54 @@ +<table border="1" class="dataframe"> + <thead> + <tr> + <th>index.name</th> + <th></th> + <th></th> + <th></th> + <th></th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + <td>1</td> + <td>...</td> + <td>6</td> + <td>7</td> + </tr> + <tr> + <th>1</th> + <td>8</td> + <td>9</td> + <td>...</td> + <td>14</td> + <td>15</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + </tr> + <tr> + <th>6</th> + <td>48</td> + <td>49</td> + <td>...</td> + <td>54</td> + <td>55</td> + </tr> + <tr> + <th>7</th> + <td>56</td> + <td>57</td> + <td>...</td> + <td>62</td> + <td>63</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 213eb0d5b5cb8..d333330c19e39 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -429,6 +429,7 @@ def test_to_html_multi_indexes_index_false(self, datapath): assert result == expected @pytest.mark.parametrize('index_names', [True, False]) + @pytest.mark.parametrize('header', [True, False]) @pytest.mark.parametrize('index', [True, False]) @pytest.mark.parametrize('column_index, column_type', [ (Index([0, 1]), 'unnamed_standard'), @@ -448,18 +449,21 @@ def test_to_html_multi_indexes_index_false(self, datapath): ]) def test_to_html_basic_alignment( self, datapath, row_index, row_type, column_index, column_type, - index, index_names): + index, header, index_names): # GH 22747, GH 22579 df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index) - result = df.to_html(index=index, index_names=index_names) + result = df.to_html( + index=index, header=header, index_names=index_names) if not index: row_type = 'none' elif not index_names and row_type.startswith('named'): row_type = 'un' + row_type - if not index_names and column_type.startswith('named'): + if not header: + column_type = 'none' + elif not index_names and column_type.startswith('named'): column_type = 'un' + column_type filename = 'index_' + row_type + '_columns_' + column_type @@ -467,6 +471,7 @@ def test_to_html_basic_alignment( assert result == expected @pytest.mark.parametrize('index_names', [True, False]) + @pytest.mark.parametrize('header', [True, False]) @pytest.mark.parametrize('index', [True, False]) @pytest.mark.parametrize('column_index, column_type', [ (Index(np.arange(8)), 'unnamed_standard'), @@ -488,19 +493,22 @@ def test_to_html_basic_alignment( ]) def test_to_html_alignment_with_truncation( self, datapath, row_index, row_type, column_index, column_type, - index, index_names): + index, header, index_names): # GH 22747, GH 22579 df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index) - result = df.to_html(max_rows=4, max_cols=4, - index=index, index_names=index_names) + result = df.to_html( + max_rows=4, max_cols=4, + index=index, header=header, index_names=index_names) if not index: row_type = 'none' elif not index_names and row_type.startswith('named'): row_type = 'un' + row_type - if not index_names and column_type.startswith('named'): + if not header: + column_type = 'none' + elif not index_names and column_type.startswith('named'): column_type = 'un' + column_type filename = 'trunc_df_index_' + row_type + '_columns_' + column_type
- [x] closes #23788 - [x] xref #24546 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24547
2019-01-02T11:58:05Z
2019-01-02T14:18:30Z
2019-01-02T14:18:30Z
2019-01-02T14:45:22Z
TST: move test_non_reducing_slice_on_multiindex
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 596fe5d564a40..fcecb2b454eb6 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -7,6 +7,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp +from pandas.core.indexing import _non_reducing_slice from pandas.tests.indexing.common import _mklbl from pandas.util import testing as tm @@ -556,3 +557,20 @@ def test_int_series_slicing( result = ymd[5:] expected = ymd.reindex(s.index[5:]) tm.assert_frame_equal(result, expected) + + def test_non_reducing_slice_on_multiindex(self): + # GH 19861 + dic = { + ('a', 'd'): [1, 4], + ('a', 'c'): [2, 3], + ('b', 'c'): [3, 2], + ('b', 'd'): [4, 1] + } + df = pd.DataFrame(dic, index=[0, 1]) + idx = pd.IndexSlice + slice_ = idx[:, idx['b', 'd']] + tslice_ = _non_reducing_slice(slice_) + + result = df.loc[tslice_] + expected = pd.DataFrame({('b', 'd'): [4, 1]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 2224c3ab9935a..03f1975c50d2a 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -812,23 +812,6 @@ def test_non_reducing_slice(self): tslice_ = _non_reducing_slice(slice_) assert isinstance(df.loc[tslice_], DataFrame) - def test_non_reducing_slice_on_multiindex(self): - # GH 19861 - dic = { - ('a', 'd'): [1, 4], - ('a', 'c'): [2, 3], - ('b', 'c'): [3, 2], - ('b', 'd'): [4, 1] - } - df = pd.DataFrame(dic, index=[0, 1]) - idx = pd.IndexSlice - slice_ = idx[:, idx['b', 'd']] - tslice_ = _non_reducing_slice(slice_) - - result = df.loc[tslice_] - expected = pd.DataFrame({('b', 'd'): [4, 1]}) - tm.assert_frame_equal(result, expected) - def test_list_slice(self): # like dataframe getitem slices = [['A'], Series(['A']), np.array(['A'])]
Follow-up to #19881
https://api.github.com/repos/pandas-dev/pandas/pulls/24545
2019-01-02T09:23:52Z
2019-01-02T12:21:10Z
2019-01-02T12:21:10Z
2019-01-02T13:48:12Z
MAINT: Remove empty Python file
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py deleted file mode 100644 index e69de29bb2d1d..0000000000000
Follow-up to #23255.
https://api.github.com/repos/pandas-dev/pandas/pulls/24544
2019-01-02T05:47:10Z
2019-01-02T06:25:35Z
2019-01-02T06:25:35Z
2019-01-02T06:27:17Z
diff reduction for 24024
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 3f32b7b7dcea9..8b0565a36648f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -19,7 +19,7 @@ is_extension_type, is_float_dtype, is_int64_dtype, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -224,7 +224,7 @@ def _simple_new(cls, values, freq=None, tz=None): # for compat with datetime/timedelta/period shared methods, # we can sometimes get here with int64 values. These represent # nanosecond UTC (or tz-naive) unix timestamps - values = values.view('M8[ns]') + values = values.view(_NS_DTYPE) assert values.dtype == 'M8[ns]', values.dtype @@ -417,7 +417,7 @@ def tz(self): Returns None when the array is tz-naive. """ # GH 18595 - return getattr(self._dtype, "tz", None) + return getattr(self.dtype, "tz", None) @tz.setter def tz(self, value): @@ -517,10 +517,6 @@ def astype(self, dtype, copy=True): # ---------------------------------------------------------------- # ExtensionArray Interface - @property - def _ndarray_values(self): - return self._data - @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__) def _validate_fill_value(self, fill_value): if isna(fill_value): @@ -1568,6 +1564,8 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, copy = False elif isinstance(data, ABCSeries): data = data._values + if isinstance(data, ABCPandasArray): + data = data.to_numpy() if hasattr(data, "freq"): # i.e. DatetimeArray/Index diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 7199d88d4bde5..45a6081093aed 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -269,11 +269,6 @@ def _check_compatible_with(self, other): def dtype(self): return self._dtype - @property - def _ndarray_values(self): - # Ordinals - return self._data - @property def freq(self): """ @@ -475,7 +470,6 @@ def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): """ actually format my specific types """ - # TODO(DatetimeArray): remove values = self.astype(object) if date_format: diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 719a79cf300a0..78570be8dc07f 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -369,8 +369,9 @@ def _addsub_offset_array(self, other, op): # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - return dtl.DatetimeLikeArrayMixin._addsub_offset_array(self, other, - op) + return super(TimedeltaArrayMixin, self)._addsub_offset_array( + other, op + ) except AttributeError: raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" .format(cls=type(self).__name__)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 25cd5cda9989c..50b2413167b32 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -55,6 +55,7 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): """ common ops mixin to support a unified interface datetimelike Index """ + _data = None # type: DatetimeLikeArrayMixin # DatetimeLikeArrayMixin assumes subclasses are mutable, so these are # properties there. They can be made into cache_readonly for Index @@ -72,6 +73,9 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): @property def freq(self): + """ + Return the frequency object if it is set, otherwise None. + """ return self._eadata.freq @freq.setter @@ -81,6 +85,9 @@ def freq(self, value): @property def freqstr(self): + """ + Return the frequency object as a string if it is set, otherwise None. + """ return self._eadata.freqstr def unique(self, level=None): @@ -111,6 +118,20 @@ def wrapper(self, other): def _ndarray_values(self): return self._eadata._ndarray_values + # ------------------------------------------------------------------------ + # Abstract data attributes + + @property + def values(self): + # type: () -> np.ndarray + # Note: PeriodArray overrides this to return an ndarray of objects. + return self._eadata._data + + @property + @Appender(DatetimeLikeArrayMixin.asi8.__doc__) + def asi8(self): + return self._eadata.asi8 + # ------------------------------------------------------------------------ def equals(self, other): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 5695d3d9e67f3..690a3db28fe83 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -330,7 +330,6 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): result._eadata = dtarr result.name = name # For groupby perf. See note in indexes/base about _index_data - # TODO: make sure this is updated correctly if edited result._index_data = result._data result._reset_identity() return result diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a915f24e3c87f..4bd8f7407500b 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -18,7 +18,6 @@ from pandas.core import common as com from pandas.core.accessor import delegate_names from pandas.core.algorithms import unique1d -from pandas.core.arrays.datetimelike import DatelikeOps from pandas.core.arrays.period import ( PeriodArray, period_array, validate_dtype_freq) from pandas.core.base import _shared_docs @@ -70,9 +69,9 @@ class PeriodDelegateMixin(DatetimelikeDelegateMixin): typ='property') @delegate_names(PeriodArray, PeriodDelegateMixin._delegated_methods, - typ="method") -class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index, - PeriodDelegateMixin): + typ="method", + overwrite=True) +class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): """ Immutable ndarray holding ordinal values indicating regular periods in time such as particular years, quarters, months, etc. @@ -291,20 +290,15 @@ def _eadata(self): def values(self): return np.asarray(self) - @property - def _values(self): - return self._data - @property def freq(self): - # TODO(DatetimeArray): remove - # Can't simply use delegate_names since our base class is defining - # freq return self._data.freq @freq.setter def freq(self, value): value = Period._maybe_convert_freq(value) + # TODO: When this deprecation is enforced, PeriodIndex.freq can + # be removed entirely, and we'll just inherit. msg = ('Setting {cls}.freq has been deprecated and will be ' 'removed in a future version; use {cls}.asfreq instead. ' 'The {cls}.freq setter is not guaranteed to work.') @@ -897,11 +891,6 @@ def flags(self): FutureWarning, stacklevel=2) return self._ndarray_values.flags - @property - def asi8(self): - # TODO(DatetimeArray): remove - return self.view('i8') - def item(self): """ return the first element of the underlying data as a python diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6206a6a615d64..0798dd6eee0c9 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -70,8 +70,8 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): @delegate_names(TimedeltaArray, TimedeltaDelegateMixin._delegated_methods, typ="method", overwrite=False) -class TimedeltaIndex(DatetimeIndexOpsMixin, - dtl.TimelikeOps, Int64Index, TimedeltaDelegateMixin): +class TimedeltaIndex(DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index, + TimedeltaDelegateMixin): """ Immutable ndarray of timedelta64 data, represented internally as int64, and which can be boxed to timedelta objects @@ -238,7 +238,6 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): result._eadata = tdarr result.name = name # For groupby perf. See note in indexes/base about _index_data - # TODO: make sure this is updated correctly if edited result._index_data = tdarr._data result._reset_identity() diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c9ed2521676ad..346f56968c963 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2165,7 +2165,7 @@ def should_store(self, value): class DatetimeLikeBlockMixin(object): - """Mixin class for DatetimeBlock and DatetimeTZBlock.""" + """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" @property def _holder(self): @@ -2857,15 +2857,17 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None, """ convert to our native types format, slicing if desired """ values = self.values + i8values = self.values.view('i8') + if slicer is not None: - values = values[..., slicer] + i8values = i8values[..., slicer] from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime( - values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(values.shape) + i8values.ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(i8values.shape) return np.atleast_2d(result) def should_store(self, value): @@ -3115,8 +3117,16 @@ def get_block_type(values, dtype=None): dtype = dtype or values.dtype vtype = dtype.type - if is_categorical(values): + if is_sparse(dtype): + # Need this first(ish) so that Sparse[datetime] is sparse + cls = ExtensionBlock + elif is_categorical(values): cls = CategoricalBlock + elif issubclass(vtype, np.datetime64): + assert not is_datetime64tz_dtype(values) + cls = DatetimeBlock + elif is_datetime64tz_dtype(values): + cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): @@ -3128,11 +3138,6 @@ def get_block_type(values, dtype=None): cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock - elif issubclass(vtype, np.datetime64): - assert not is_datetime64tz_dtype(values) - cls = DatetimeBlock - elif is_datetime64tz_dtype(values): - cls = DatetimeTZBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 7cab52ddda87f..e11f0ee01e57c 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1539,17 +1539,20 @@ def wrapper(left, right): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) - elif (is_extension_array_dtype(left) or - (is_extension_array_dtype(right) and not is_scalar(right))): - # GH#22378 disallow scalar to exclude e.g. "category", "Int64" - return dispatch_to_extension_op(op, left, right) - elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + # Give dispatch_to_index_op a chance for tests like + # test_dt64_series_add_intlike, which the index dispatching handles + # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) + elif (is_extension_array_dtype(left) or + (is_extension_array_dtype(right) and not is_scalar(right))): + # GH#22378 disallow scalar to exclude e.g. "category", "Int64" + return dispatch_to_extension_op(op, left, right) + elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8a833d8197381..48b64c2968219 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -16,6 +16,14 @@ class TestDatetimeArrayConstructor(object): + def test_from_pandas_array(self): + arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 + + result = DatetimeArray._from_sequence(arr, freq='infer') + + expected = pd.date_range('1970-01-01', periods=5, freq='H')._eadata + tm.assert_datetime_array_equal(result, expected) + def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index a21d0104b0d04..6e006c1707604 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -3245,7 +3245,9 @@ def test_setitem(self): b1 = df._data.blocks[1] b2 = df._data.blocks[2] assert b1.values.equals(b2.values) - assert id(b1.values.values.base) != id(b2.values.values.base) + if b1.values.values.base is not None: + # base being None suffices to assure a copy was made + assert id(b1.values.values.base) != id(b2.values.values.base) # with nan df2 = df.copy()
docstrings, comments, edits that are correct both before and after #24024 the only substantive change is adding a test specifically for constructing a DatetimeArray from a PandasArray.
https://api.github.com/repos/pandas-dev/pandas/pulls/24543
2019-01-02T04:15:19Z
2019-01-02T14:16:52Z
2019-01-02T14:16:52Z
2019-01-02T14:44:30Z
CLN: use idiomatic pandas_dtypes in pandas/dtypes/common.py
diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py new file mode 100644 index 0000000000000..e59154cd99965 --- /dev/null +++ b/asv_bench/benchmarks/dtypes.py @@ -0,0 +1,39 @@ +from pandas.api.types import pandas_dtype + +import numpy as np +from .pandas_vb_common import ( + numeric_dtypes, datetime_dtypes, string_dtypes, extension_dtypes) + + +_numpy_dtypes = [np.dtype(dtype) + for dtype in (numeric_dtypes + + datetime_dtypes + + string_dtypes)] +_dtypes = _numpy_dtypes + extension_dtypes + + +class Dtypes(object): + params = (_dtypes + + list(map(lambda dt: dt.name, _dtypes))) + param_names = ['dtype'] + + def time_pandas_dtype(self, dtype): + pandas_dtype(dtype) + + +class DtypesInvalid(object): + param_names = ['dtype'] + params = ['scalar-string', 'scalar-int', 'list-string', 'array-string'] + data_dict = {'scalar-string': 'foo', + 'scalar-int': 1, + 'list-string': ['foo'] * 1000, + 'array-string': np.array(['foo'] * 1000)} + + def time_pandas_dtype_invalid(self, dtype): + try: + pandas_dtype(self.data_dict[dtype]) + except TypeError: + pass + + +from .pandas_vb_common import setup # noqa: F401 diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index e7b25d567e03b..ab5e5fd3bfe10 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -2,6 +2,7 @@ from importlib import import_module import numpy as np +import pandas as pd # Compatibility import for lib for imp in ['pandas._libs.lib', 'pandas.lib']: @@ -14,6 +15,15 @@ numeric_dtypes = [np.int64, np.int32, np.uint32, np.uint64, np.float32, np.float64, np.int16, np.int8, np.uint16, np.uint8] datetime_dtypes = [np.datetime64, np.timedelta64] +string_dtypes = [np.object] +extension_dtypes = [pd.Int8Dtype, pd.Int16Dtype, + pd.Int32Dtype, pd.Int64Dtype, + pd.UInt8Dtype, pd.UInt16Dtype, + pd.UInt32Dtype, pd.UInt64Dtype, + pd.CategoricalDtype, + pd.IntervalDtype, + pd.DatetimeTZDtype('ns', 'UTC'), + pd.PeriodDtype('D')] def setup(*args, **kwargs): diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3a04789b609f8..78673a607b206 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -430,7 +430,7 @@ Backwards incompatible API changes - The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`) - Incorrectly passing a :class:`DatetimeIndex` to :meth:`MultiIndex.from_tuples`, rather than a sequence of tuples, now raises a ``TypeError`` rather than a ``ValueError`` (:issue:`24024`) - :func:`pd.offsets.generate_range` argument ``time_rule`` has been removed; use ``offset`` instead (:issue:`24157`) -- In 0.23.x, pandas would raise a ``ValueError`` on a merge of a numeric column (e.g. ``int`` dtyped column) and an ``object`` dtyped column (:issue:`9780`). We have re-enabled the ability to merge ``object`` and other dtypes (:issue:`21681`) +- In 0.23.x, pandas would raise a ``ValueError`` on a merge of a numeric column (e.g. ``int`` dtyped column) and an ``object`` dtyped column (:issue:`9780`). We have re-enabled the ability to merge ``object`` and other dtypes; pandas will still raise on a merge between a numeric and an ``object`` dtyped column that is composed only of strings (:issue:`21681`) Percentage change on groupby ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/conftest.py b/pandas/conftest.py index f383fb32810e7..30b24e00779a9 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -388,9 +388,14 @@ def tz_aware_fixture(request): return request.param +# ---------------------------------------------------------------- +# Dtypes UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] +UNSIGNED_EA_INT_DTYPES = ["UInt8", "UInt16", "UInt32", "UInt64"] SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] +SIGNED_EA_INT_DTYPES = ["Int8", "Int16", "Int32", "Int64"] ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES +ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES FLOAT_DTYPES = [float, "float32", "float64"] COMPLEX_DTYPES = [complex, "complex64", "complex128"] diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index af2c05bbee7c2..f8f87ff1c96f1 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -32,6 +32,7 @@ class _IntegerDtype(ExtensionDtype): The attributes name & type are set when these subclasses are created. """ name = None + base = None type = None na_value = np.nan @@ -153,6 +154,7 @@ def coerce_to_array(values, dtype, mask=None, copy=False): # Avoid DeprecationWarning from NumPy about np.dtype("Int64") # https://github.com/numpy/numpy/pull/7476 dtype = dtype.lower() + if not issubclass(type(dtype), _IntegerDtype): try: dtype = _dtypes[str(np.dtype(dtype))] @@ -655,7 +657,8 @@ def integer_arithmetic_method(self, other): else: name = dtype.capitalize() classname = "{}Dtype".format(name) - attributes_dict = {'type': getattr(np, dtype), + numpy_dtype = getattr(np, dtype) + attributes_dict = {'type': numpy_dtype, 'name': name} dtype_type = register_extension_dtype( type(classname, (_IntegerDtype, ), attributes_dict) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b55bad46580fe..a67bdffc2aeb7 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -9,9 +9,9 @@ from pandas.compat import PY3, string_types, text_type, to_str from .common import ( - _INT64_DTYPE, _NS_DTYPE, _POSSIBLY_CAST_DTYPES, _TD_DTYPE, _string_dtypes, - ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, - is_bool, is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype, + _INT64_DTYPE, _NS_DTYPE, _POSSIBLY_CAST_DTYPES, _TD_DTYPE, ensure_int8, + ensure_int16, ensure_int32, ensure_int64, ensure_object, is_bool, + is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_datetimelike, is_dtype_equal, is_extension_array_dtype, is_extension_type, is_float, is_float_dtype, @@ -544,7 +544,7 @@ def invalidate_string_dtypes(dtype_set): """Change string like dtypes to object for ``DataFrame.select_dtypes()``. """ - non_string_dtypes = dtype_set - _string_dtypes + non_string_dtypes = dtype_set - {np.dtype('S').type, np.dtype('<U').type} if non_string_dtypes != dtype_set: raise TypeError("string dtypes are not allowed, use 'object' instead") diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index b4c769fab88ad..507dacb5322a6 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -4,17 +4,15 @@ import numpy as np from pandas._libs import algos, lib -from pandas._libs.interval import Interval -from pandas._libs.tslibs import Period, Timestamp, conversion -from pandas.compat import PY3, PY36, binary_type, string_types, text_type +from pandas._libs.tslibs import conversion +from pandas.compat import PY3, PY36, string_types from pandas.core.dtypes.dtypes import ( - CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, ExtensionDtype, - IntervalDtype, PandasExtensionDtype, PeriodDtype, registry) + CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, + PandasExtensionDtype, PeriodDtype, registry) from pandas.core.dtypes.generic import ( - ABCCategorical, ABCCategoricalIndex, ABCDateOffset, ABCDatetimeIndex, - ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ABCSparseArray, - ABCSparseSeries) + ABCCategorical, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, + ABCPeriodArray, ABCPeriodIndex, ABCSeries) from pandas.core.dtypes.inference import ( # noqa:F401 is_array_like, is_bool, is_complex, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, @@ -116,6 +114,20 @@ def ensure_int64_or_float64(arr, copy=False): return arr.astype('float64', copy=copy) +def classes(*klasses): + """ evaluate if the tipo is a subclass of the klasses """ + return lambda tipo: issubclass(tipo, klasses) + + +def classes_and_not_datetimelike(*klasses): + """ + evaluate if the tipo is a subclass of the klasses + and not a datetimelike + """ + return lambda tipo: (issubclass(tipo, klasses) and + not issubclass(tipo, (np.datetime64, np.timedelta64))) + + def is_object_dtype(arr_or_dtype): """ Check whether an array-like or dtype is of the object dtype. @@ -142,11 +154,7 @@ def is_object_dtype(arr_or_dtype): >>> is_object_dtype([1, 2, 3]) False """ - - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, np.object_) + return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_sparse(arr): @@ -420,13 +428,7 @@ def is_datetime64_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - try: - tipo = _get_dtype_type(arr_or_dtype) - except (TypeError, UnicodeEncodeError): - return False - return issubclass(tipo, np.datetime64) + return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) def is_datetime64tz_dtype(arr_or_dtype): @@ -495,13 +497,7 @@ def is_timedelta64_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - try: - tipo = _get_dtype_type(arr_or_dtype) - except (TypeError, ValueError, SyntaxError): - return False - return issubclass(tipo, np.timedelta64) + return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) def is_period_dtype(arr_or_dtype): @@ -635,14 +631,9 @@ def is_string_dtype(arr_or_dtype): """ # TODO: gh-15585: consider making the checks stricter. - - if arr_or_dtype is None: - return False - try: - dtype = _get_dtype(arr_or_dtype) + def condition(dtype): return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype) - except TypeError: - return False + return _is_dtype(arr_or_dtype, condition) def is_period_arraylike(arr): @@ -832,6 +823,11 @@ def is_any_int_dtype(arr_or_dtype): This function is internal and should not be exposed in the public API. + .. versionchanged:: 0.24.0 + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + Parameters ---------- arr_or_dtype : array-like @@ -865,10 +861,8 @@ def is_any_int_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, np.integer) + return _is_dtype_type( + arr_or_dtype, classes(np.integer, np.timedelta64)) def is_integer_dtype(arr_or_dtype): @@ -877,6 +871,11 @@ def is_integer_dtype(arr_or_dtype): Unlike in `in_any_int_dtype`, timedelta64 instances will return False. + .. versionchanged:: 0.24.0 + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + Parameters ---------- arr_or_dtype : array-like @@ -897,6 +896,12 @@ def is_integer_dtype(arr_or_dtype): False >>> is_integer_dtype(np.uint64) True + >>> is_integer_dtype('int8') + True + >>> is_integer_dtype('Int8') + True + >>> is_integer_dtype(pd.Int8Dtype) + True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) @@ -911,11 +916,8 @@ def is_integer_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return (issubclass(tipo, np.integer) and - not issubclass(tipo, (np.datetime64, np.timedelta64))) + return _is_dtype_type( + arr_or_dtype, classes_and_not_datetimelike(np.integer)) def is_signed_integer_dtype(arr_or_dtype): @@ -924,6 +926,11 @@ def is_signed_integer_dtype(arr_or_dtype): Unlike in `in_any_int_dtype`, timedelta64 instances will return False. + .. versionchanged:: 0.24.0 + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + Parameters ---------- arr_or_dtype : array-like @@ -944,6 +951,12 @@ def is_signed_integer_dtype(arr_or_dtype): False >>> is_signed_integer_dtype(np.uint64) # unsigned False + >>> is_signed_integer_dtype('int8') + True + >>> is_signed_integer_dtype('Int8') + True + >>> is_signed_dtype(pd.Int8Dtype) + True >>> is_signed_integer_dtype(np.datetime64) False >>> is_signed_integer_dtype(np.timedelta64) @@ -960,17 +973,19 @@ def is_signed_integer_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return (issubclass(tipo, np.signedinteger) and - not issubclass(tipo, (np.datetime64, np.timedelta64))) + return _is_dtype_type( + arr_or_dtype, classes_and_not_datetimelike(np.signedinteger)) def is_unsigned_integer_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of an unsigned integer dtype. + .. versionchanged:: 0.24.0 + + The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also + considered as integer by this function. + Parameters ---------- arr_or_dtype : array-like @@ -991,6 +1006,12 @@ def is_unsigned_integer_dtype(arr_or_dtype): False >>> is_unsigned_integer_dtype(np.uint64) True + >>> is_unsigned_integer_dtype('uint8') + True + >>> is_unsigned_integer_dtype('UInt8') + True + >>> is_unsigned_integer_dtype(pd.UInt8Dtype) + True >>> is_unsigned_integer_dtype(np.array(['a', 'b'])) False >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed @@ -1000,12 +1021,8 @@ def is_unsigned_integer_dtype(arr_or_dtype): >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) True """ - - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return (issubclass(tipo, np.unsignedinteger) and - not issubclass(tipo, (np.datetime64, np.timedelta64))) + return _is_dtype_type( + arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger)) def is_int64_dtype(arr_or_dtype): @@ -1035,6 +1052,12 @@ def is_int64_dtype(arr_or_dtype): False >>> is_int64_dtype(np.int64) True + >>> is_int64_dtype('int8') + False + >>> is_int64_dtype('Int8') + False + >>> is_int64_dtype(pd.Int64Dtype) + True >>> is_int64_dtype(float) False >>> is_int64_dtype(np.uint64) # unsigned @@ -1049,10 +1072,7 @@ def is_int64_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, np.int64) + return _is_dtype_type(arr_or_dtype, classes(np.int64)) def is_datetime64_any_dtype(arr_or_dtype): @@ -1172,14 +1192,7 @@ def is_timedelta64_ns_dtype(arr_or_dtype): >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) False """ - - if arr_or_dtype is None: - return False - try: - tipo = _get_dtype(arr_or_dtype) - return tipo == _TD_DTYPE - except TypeError: - return False + return _is_dtype(arr_or_dtype, lambda dtype: dtype == _TD_DTYPE) def is_datetime_or_timedelta_dtype(arr_or_dtype): @@ -1217,10 +1230,8 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype): True """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, (np.datetime64, np.timedelta64)) + return _is_dtype_type( + arr_or_dtype, classes(np.datetime64, np.timedelta64)) def _is_unorderable_exception(e): @@ -1495,11 +1506,8 @@ def is_numeric_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return (issubclass(tipo, (np.number, np.bool_)) and - not issubclass(tipo, (np.datetime64, np.timedelta64))) + return _is_dtype_type( + arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_)) def is_string_like_dtype(arr_or_dtype): @@ -1530,13 +1538,8 @@ def is_string_like_dtype(arr_or_dtype): False """ - if arr_or_dtype is None: - return False - try: - dtype = _get_dtype(arr_or_dtype) - return dtype.kind in ('S', 'U') - except TypeError: - return False + return _is_dtype( + arr_or_dtype, lambda dtype: dtype.kind in ('S', 'U')) def is_float_dtype(arr_or_dtype): @@ -1569,11 +1572,7 @@ def is_float_dtype(arr_or_dtype): >>> is_float_dtype(pd.Index([1, 2.])) True """ - - if arr_or_dtype is None: - return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, np.floating) + return _is_dtype_type(arr_or_dtype, classes(np.floating)) def is_bool_dtype(arr_or_dtype): @@ -1618,14 +1617,10 @@ def is_bool_dtype(arr_or_dtype): if arr_or_dtype is None: return False try: - tipo = _get_dtype_type(arr_or_dtype) - except ValueError: - # this isn't even a dtype + dtype = _get_dtype(arr_or_dtype) + except TypeError: return False - if isinstance(arr_or_dtype, (ABCCategorical, ABCCategoricalIndex)): - arr_or_dtype = arr_or_dtype.dtype - if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories # now we use the special definition for Index @@ -1642,7 +1637,7 @@ def is_bool_dtype(arr_or_dtype): dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return dtype._is_boolean - return issubclass(tipo, np.bool_) + return issubclass(dtype.type, np.bool_) def is_extension_type(arr): @@ -1761,10 +1756,32 @@ def is_complex_dtype(arr_or_dtype): True """ + return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) + + +def _is_dtype(arr_or_dtype, condition): + """ + Return a boolean if the condition is satisfied for the arr_or_dtype. + + Parameters + ---------- + arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtype]] + + Returns + ------- + bool + + """ + if arr_or_dtype is None: return False - tipo = _get_dtype_type(arr_or_dtype) - return issubclass(tipo, np.complexfloating) + try: + dtype = _get_dtype(arr_or_dtype) + except (TypeError, ValueError, UnicodeEncodeError): + return False + return condition(dtype) def _get_dtype(arr_or_dtype): @@ -1787,95 +1804,70 @@ def _get_dtype(arr_or_dtype): TypeError : The passed in object is None. """ - # TODO(extension) - # replace with pandas_dtype - if arr_or_dtype is None: raise TypeError("Cannot deduce dtype from null object") - if isinstance(arr_or_dtype, np.dtype): + + # fastpath + elif isinstance(arr_or_dtype, np.dtype): return arr_or_dtype elif isinstance(arr_or_dtype, type): return np.dtype(arr_or_dtype) - elif isinstance(arr_or_dtype, ExtensionDtype): - return arr_or_dtype - elif isinstance(arr_or_dtype, DatetimeTZDtype): - return arr_or_dtype - elif isinstance(arr_or_dtype, PeriodDtype): - return arr_or_dtype - elif isinstance(arr_or_dtype, IntervalDtype): - return arr_or_dtype - elif isinstance(arr_or_dtype, string_types): - if is_categorical_dtype(arr_or_dtype): - return CategoricalDtype.construct_from_string(arr_or_dtype) - elif is_datetime64tz_dtype(arr_or_dtype): - return DatetimeTZDtype.construct_from_string(arr_or_dtype) - elif is_period_dtype(arr_or_dtype): - return PeriodDtype.construct_from_string(arr_or_dtype) - elif is_interval_dtype(arr_or_dtype): - return IntervalDtype.construct_from_string(arr_or_dtype) - elif isinstance(arr_or_dtype, (ABCCategorical, ABCCategoricalIndex, - ABCSparseArray, ABCSparseSeries)): - return arr_or_dtype.dtype - if hasattr(arr_or_dtype, 'dtype'): + # if we have an array-like + elif hasattr(arr_or_dtype, 'dtype'): arr_or_dtype = arr_or_dtype.dtype - return np.dtype(arr_or_dtype) + return pandas_dtype(arr_or_dtype) -def _get_dtype_type(arr_or_dtype): + +def _is_dtype_type(arr_or_dtype, condition): """ - Get the type (NOT dtype) instance associated with - an array or dtype object. + Return a boolean if the condition is satisfied for the arr_or_dtype. Parameters ---------- arr_or_dtype : array-like - The array-like or dtype object whose type we want to extract. + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtypeType]] Returns ------- - obj_type : The extract type instance from the - passed in array or dtype object. + bool : if the condition is satisifed for the arr_or_dtype """ - # TODO(extension) - # replace with pandas_dtype + if arr_or_dtype is None: + return condition(type(None)) + + # fastpath if isinstance(arr_or_dtype, np.dtype): - return arr_or_dtype.type + return condition(arr_or_dtype.type) elif isinstance(arr_or_dtype, type): - return np.dtype(arr_or_dtype).type - elif isinstance(arr_or_dtype, CategoricalDtype): - return CategoricalDtypeType - elif isinstance(arr_or_dtype, DatetimeTZDtype): - return Timestamp - elif isinstance(arr_or_dtype, IntervalDtype): - return Interval - elif isinstance(arr_or_dtype, PeriodDtype): - return Period - elif isinstance(arr_or_dtype, string_types): - if is_categorical_dtype(arr_or_dtype): - return CategoricalDtypeType - elif is_datetime64tz_dtype(arr_or_dtype): - return Timestamp - elif is_period_dtype(arr_or_dtype): - return Period - elif is_interval_dtype(arr_or_dtype): - return Interval - return _get_dtype_type(np.dtype(arr_or_dtype)) - else: - from pandas.core.arrays.sparse import SparseDtype - if isinstance(arr_or_dtype, (ABCSparseSeries, - ABCSparseArray, - SparseDtype)): - dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) - return dtype.type + if issubclass(arr_or_dtype, (PandasExtensionDtype, ExtensionDtype)): + arr_or_dtype = arr_or_dtype.type + return condition(np.dtype(arr_or_dtype).type) + elif arr_or_dtype is None: + return condition(type(None)) + + # if we have an array-like + if hasattr(arr_or_dtype, 'dtype'): + arr_or_dtype = arr_or_dtype.dtype + + # we are not possibly a dtype + elif is_list_like(arr_or_dtype): + return condition(type(None)) + try: - return arr_or_dtype.dtype.type - except AttributeError: - return type(None) + tipo = pandas_dtype(arr_or_dtype).type + except (TypeError, ValueError, UnicodeEncodeError): + if is_scalar(arr_or_dtype): + return condition(type(None)) + + return False + + return condition(tipo) -def _get_dtype_from_object(dtype): +def infer_dtype_from_object(dtype): """ Get a numpy dtype.type-style object for a dtype object. @@ -1898,18 +1890,26 @@ def _get_dtype_from_object(dtype): if isinstance(dtype, type) and issubclass(dtype, np.generic): # Type object from a dtype return dtype - elif is_categorical(dtype): - return CategoricalDtype().type - elif is_datetime64tz_dtype(dtype): - return DatetimeTZDtype(dtype).type - elif isinstance(dtype, np.dtype): # dtype object + elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)): + # dtype object try: _validate_date_like_dtype(dtype) except TypeError: # Should still pass if we don't have a date-like pass return dtype.type + + try: + dtype = pandas_dtype(dtype) + except TypeError: + pass + + if is_extension_array_dtype(dtype): + return dtype.type elif isinstance(dtype, string_types): + + # TODO(jreback) + # should deprecate these if dtype in ['datetimetz', 'datetime64tz']: return DatetimeTZDtype.type elif dtype in ['period']: @@ -1917,9 +1917,8 @@ def _get_dtype_from_object(dtype): if dtype == 'datetime' or dtype == 'timedelta': dtype += '64' - try: - return _get_dtype_from_object(getattr(np, dtype)) + return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): # Handles cases like _get_dtype(int) i.e., # Python objects that are valid dtypes @@ -1929,7 +1928,7 @@ def _get_dtype_from_object(dtype): # further handle internal types pass - return _get_dtype_from_object(np.dtype(dtype)) + return infer_dtype_from_object(np.dtype(dtype)) def _validate_date_like_dtype(dtype): @@ -1957,10 +1956,6 @@ def _validate_date_like_dtype(dtype): raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__)) -_string_dtypes = frozenset(map(_get_dtype_from_object, (binary_type, - text_type))) - - def pandas_dtype(dtype): """ Converts input into a pandas only dtype object or a numpy dtype object. @@ -1980,7 +1975,7 @@ def pandas_dtype(dtype): # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype - elif isinstance(dtype, np.dtype): + elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)): return dtype # registered extension types @@ -1988,10 +1983,6 @@ def pandas_dtype(dtype): if result is not None: return result - # un-registered extension types - elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): - return dtype - # try a numpy dtype # raise a consistent TypeError if failed try: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index e6967ed2a4d3d..aada777decaa7 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -9,8 +9,7 @@ from pandas.core.dtypes.common import ( _NS_DTYPE, _TD_DTYPE, is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, - is_extension_array_dtype, is_interval_dtype, is_object_dtype, - is_period_dtype, is_sparse, is_timedelta64_dtype) + is_extension_array_dtype, is_object_dtype, is_sparse, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame, ABCTimedeltaIndex) @@ -51,9 +50,7 @@ def get_dtype_kinds(l): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' - elif is_period_dtype(dtype): - typ = str(arr.dtype) - elif is_interval_dtype(dtype): + elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind @@ -136,7 +133,6 @@ def is_nonempty(x): # np.concatenate which has them both implemented is compiled. typs = get_dtype_kinds(to_concat) - _contains_datetime = any(typ.startswith('datetime') for typ in typs) _contains_period = any(typ.startswith('period') for typ in typs) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76d3d704497b4..a50def7357826 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -60,7 +60,7 @@ is_scalar, is_dtype_equal, needs_i8_conversion, - _get_dtype_from_object, + infer_dtype_from_object, ensure_float64, ensure_int64, ensure_platform_int, @@ -3292,7 +3292,7 @@ def _get_info_slice(obj, indexer): # convert the myriad valid dtypes object to a single representation include, exclude = map( - lambda x: frozenset(map(_get_dtype_from_object, x)), selection) + lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 9d6a56200df6e..379464f4fced6 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -7,8 +7,8 @@ from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( - is_bool, is_bool_dtype, is_dtype_equal, is_float, is_integer_dtype, - is_scalar, needs_i8_conversion, pandas_dtype) + is_bool, is_bool_dtype, is_dtype_equal, is_extension_array_dtype, is_float, + is_integer_dtype, is_scalar, needs_i8_conversion, pandas_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.missing import isna @@ -328,7 +328,9 @@ def astype(self, dtype, copy=True): msg = ('Cannot convert Float64Index to dtype {dtype}; integer ' 'values are required for conversion').format(dtype=dtype) raise TypeError(msg) - elif is_integer_dtype(dtype) and self.hasnans: + elif (is_integer_dtype(dtype) and + not is_extension_array_dtype(dtype)) and self.hasnans: + # TODO(jreback); this can change once we have an EA Index type # GH 13149 raise ValueError('Cannot convert NA to integer') return super(Float64Index, self).astype(dtype, copy=copy) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 067b95f9d8847..4a16707a376e9 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -11,8 +11,8 @@ from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( _get_dtype, is_categorical_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_float_dtype, is_numeric_dtype, is_sparse, - is_timedelta64_dtype) + is_datetime64tz_dtype, is_extension_array_dtype, is_float_dtype, + is_numeric_dtype, is_sparse, is_timedelta64_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.missing import isna @@ -306,6 +306,8 @@ def get_empty_dtype_and_na(join_units): upcast_cls = 'timedelta' elif is_sparse(dtype): upcast_cls = dtype.subtype.name + elif is_extension_array_dtype(dtype): + upcast_cls = 'object' elif is_float_dtype(dtype) or is_numeric_dtype(dtype): upcast_cls = dtype.name else: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index f62a4f8b5fba2..878a417b46674 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -560,11 +560,12 @@ def sanitize_array(data, index, dtype=None, copy=False, # possibility of nan -> garbage if is_float_dtype(data.dtype) and is_integer_dtype(dtype): - if not isna(data).any(): + try: subarr = _try_cast(data, True, dtype, copy, - raise_cast_failure) - elif copy: - subarr = data.copy() + True) + except ValueError: + if copy: + subarr = data.copy() else: subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) elif isinstance(data, Index): diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 5fcf19b0b12e7..f0f77b4977610 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -7,13 +7,28 @@ import pandas.core.dtypes.common as com from pandas.core.dtypes.dtypes import ( - CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype) + CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, IntervalDtype, + PeriodDtype) import pandas as pd +from pandas.conftest import ( + ALL_EA_INT_DTYPES, ALL_INT_DTYPES, SIGNED_EA_INT_DTYPES, SIGNED_INT_DTYPES, + UNSIGNED_EA_INT_DTYPES, UNSIGNED_INT_DTYPES) from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm +# EA & Actual Dtypes +def to_ea_dtypes(dtypes): + """ convert list of string dtypes to EA dtype """ + return [getattr(pd, dt + 'Dtype') for dt in dtypes] + + +def to_numpy_dtypes(dtypes): + """ convert list of string dtypes to numpy dtype """ + return [getattr(np, dt) for dt in dtypes if isinstance(dt, str)] + + class TestPandasDtype(object): # Passing invalid dtype, both as a string or object, must raise TypeError @@ -278,58 +293,80 @@ def test_is_datetimelike(): assert com.is_datetimelike(s) -def test_is_integer_dtype(): - assert not com.is_integer_dtype(str) - assert not com.is_integer_dtype(float) - assert not com.is_integer_dtype(np.datetime64) - assert not com.is_integer_dtype(np.timedelta64) - assert not com.is_integer_dtype(pd.Index([1, 2.])) - assert not com.is_integer_dtype(np.array(['a', 'b'])) - assert not com.is_integer_dtype(np.array([], dtype=np.timedelta64)) - - assert com.is_integer_dtype(int) - assert com.is_integer_dtype(np.uint64) - assert com.is_integer_dtype(pd.Series([1, 2])) - - -def test_is_signed_integer_dtype(): - assert not com.is_signed_integer_dtype(str) - assert not com.is_signed_integer_dtype(float) - assert not com.is_signed_integer_dtype(np.uint64) - assert not com.is_signed_integer_dtype(np.datetime64) - assert not com.is_signed_integer_dtype(np.timedelta64) - assert not com.is_signed_integer_dtype(pd.Index([1, 2.])) - assert not com.is_signed_integer_dtype(np.array(['a', 'b'])) - assert not com.is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) - assert not com.is_signed_integer_dtype(np.array([], dtype=np.timedelta64)) - - assert com.is_signed_integer_dtype(int) - assert com.is_signed_integer_dtype(pd.Series([1, 2])) - - -def test_is_unsigned_integer_dtype(): - assert not com.is_unsigned_integer_dtype(str) - assert not com.is_unsigned_integer_dtype(int) - assert not com.is_unsigned_integer_dtype(float) - assert not com.is_unsigned_integer_dtype(pd.Series([1, 2])) - assert not com.is_unsigned_integer_dtype(pd.Index([1, 2.])) - assert not com.is_unsigned_integer_dtype(np.array(['a', 'b'])) - - assert com.is_unsigned_integer_dtype(np.uint64) - assert com.is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) - - -def test_is_int64_dtype(): - assert not com.is_int64_dtype(str) - assert not com.is_int64_dtype(float) - assert not com.is_int64_dtype(np.int32) - assert not com.is_int64_dtype(np.uint64) - assert not com.is_int64_dtype(pd.Index([1, 2.])) - assert not com.is_int64_dtype(np.array(['a', 'b'])) - assert not com.is_int64_dtype(np.array([1, 2], dtype=np.uint32)) - - assert com.is_int64_dtype(np.int64) - assert com.is_int64_dtype(np.array([1, 2], dtype=np.int64)) +@pytest.mark.parametrize( + 'dtype', [ + pd.Series([1, 2])] + + ALL_INT_DTYPES + to_numpy_dtypes(ALL_INT_DTYPES) + + ALL_EA_INT_DTYPES + to_ea_dtypes(ALL_EA_INT_DTYPES)) +def test_is_integer_dtype(dtype): + assert com.is_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', [str, float, np.datetime64, np.timedelta64, + pd.Index([1, 2.]), np.array(['a', 'b']), + np.array([], dtype=np.timedelta64)]) +def test_is_not_integer_dtype(dtype): + assert not com.is_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', [ + pd.Series([1, 2])] + + SIGNED_INT_DTYPES + to_numpy_dtypes(SIGNED_INT_DTYPES) + + SIGNED_EA_INT_DTYPES + to_ea_dtypes(SIGNED_EA_INT_DTYPES)) +def test_is_signed_integer_dtype(dtype): + assert com.is_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', + [ + str, float, np.datetime64, np.timedelta64, + pd.Index([1, 2.]), np.array(['a', 'b']), + np.array([], dtype=np.timedelta64)] + + UNSIGNED_INT_DTYPES + to_numpy_dtypes(UNSIGNED_INT_DTYPES) + + UNSIGNED_EA_INT_DTYPES + to_ea_dtypes(UNSIGNED_EA_INT_DTYPES)) +def test_is_not_signed_integer_dtype(dtype): + assert not com.is_signed_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', + [pd.Series([1, 2], dtype=np.uint32)] + + UNSIGNED_INT_DTYPES + to_numpy_dtypes(UNSIGNED_INT_DTYPES) + + UNSIGNED_EA_INT_DTYPES + to_ea_dtypes(UNSIGNED_EA_INT_DTYPES)) +def test_is_unsigned_integer_dtype(dtype): + assert com.is_unsigned_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', + [ + str, float, np.datetime64, np.timedelta64, + pd.Index([1, 2.]), np.array(['a', 'b']), + np.array([], dtype=np.timedelta64)] + + SIGNED_INT_DTYPES + to_numpy_dtypes(SIGNED_INT_DTYPES) + + SIGNED_EA_INT_DTYPES + to_ea_dtypes(SIGNED_EA_INT_DTYPES)) +def test_is_not_unsigned_integer_dtype(dtype): + assert not com.is_unsigned_integer_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', + [np.int64, np.array([1, 2], dtype=np.int64), 'Int64', pd.Int64Dtype]) +def test_is_int64_dtype(dtype): + assert com.is_int64_dtype(dtype) + + +@pytest.mark.parametrize( + 'dtype', + [ + str, float, np.int32, np.uint64, pd.Index([1, 2.]), + np.array(['a', 'b']), np.array([1, 2], dtype=np.uint32), + 'int8', 'Int8', pd.Int8Dtype]) +def test_is_not_int64_dtype(dtype): + assert not com.is_int64_dtype(dtype) def test_is_datetime64_any_dtype(): @@ -375,6 +412,8 @@ def test_is_datetime_or_timedelta_dtype(): assert not com.is_datetime_or_timedelta_dtype(str) assert not com.is_datetime_or_timedelta_dtype(pd.Series([1, 2])) assert not com.is_datetime_or_timedelta_dtype(np.array(['a', 'b'])) + + # TODO(jreback), this is sligthly suspect assert not com.is_datetime_or_timedelta_dtype( DatetimeTZDtype("ns", "US/Eastern")) @@ -588,11 +627,11 @@ def test__get_dtype_fails(input_param): (pd.Series(['a', 'b']), np.object_), (pd.Index([1, 2], dtype='int64'), np.int64), (pd.Index(['a', 'b']), np.object_), - ('category', com.CategoricalDtypeType), - (pd.Categorical(['a', 'b']).dtype, com.CategoricalDtypeType), - (pd.Categorical(['a', 'b']), com.CategoricalDtypeType), - (pd.CategoricalIndex(['a', 'b']).dtype, com.CategoricalDtypeType), - (pd.CategoricalIndex(['a', 'b']), com.CategoricalDtypeType), + ('category', CategoricalDtypeType), + (pd.Categorical(['a', 'b']).dtype, CategoricalDtypeType), + (pd.Categorical(['a', 'b']), CategoricalDtypeType), + (pd.CategoricalIndex(['a', 'b']).dtype, CategoricalDtypeType), + (pd.CategoricalIndex(['a', 'b']), CategoricalDtypeType), (pd.DatetimeIndex([1, 2]), np.datetime64), (pd.DatetimeIndex([1, 2]).dtype, np.datetime64), ('<M8[ns]', np.datetime64), @@ -610,5 +649,5 @@ def test__get_dtype_fails(input_param): (1.2, type(None)), (pd.DataFrame([1, 2]), type(None)), # composite dtype ]) -def test__get_dtype_type(input_param, result): - assert com._get_dtype_type(input_param) == result +def test__is_dtype_type(input_param, result): + assert com._is_dtype_type(input_param, lambda tipo: tipo == result)
closes #24593 Some benchmarks of ``pandas_dtype`` construction from a dtype object & strings, only thing slightly suprising is ``Period[D]`` ``` [ 87.50%] ··· ============================================ ============= dtype -------------------------------------------- ------------- dtype('int64') 463±20ns dtype('int32') 450±20ns dtype('uint32') 444±4ns dtype('uint64') 484±20ns dtype('float32') 494±30ns dtype('float64') 507±30ns dtype('int16') 471±30ns dtype('int8') 506±30ns dtype('uint16') 505±40ns dtype('uint8') 485±20ns dtype('<M8') 451±20ns dtype('<m8') 480±20ns dtype('O') 627±200ns pandas.core.arrays.integer.Int8Dtype 995±60ns pandas.core.arrays.integer.Int16Dtype 944±60ns pandas.core.arrays.integer.Int32Dtype 939±70ns pandas.core.arrays.integer.Int64Dtype 988±30ns pandas.core.arrays.integer.UInt8Dtype 924±50ns pandas.core.arrays.integer.UInt16Dtype 954±60ns pandas.core.arrays.integer.UInt32Dtype 966±70ns pandas.core.arrays.integer.UInt64Dtype 1.00±0.06μs pandas.core.dtypes.dtypes.CategoricalDtype 978±30ns pandas.core.dtypes.dtypes.IntervalDtype 929±300ns datetime64[ns, UTC] 1.52±0.03μs period[D] 958±9ns int64 16.1±0.1μs int32 16.1±0.6μs uint32 16.0±0.1μs uint64 16.2±0.3μs float32 15.8±0.4μs float64 15.7±0.6μs int16 16.0±0.2μs int8 15.7±0.06μs uint16 15.9±0.08μs uint8 17.3±0.6μs datetime64 16.2±0.3μs timedelta64 16.0±0.1μs object 16.0±0.2μs Int8 6.24±0.1μs Int16 7.41±0.2μs Int32 8.15±0.04μs Int64 9.39±0.1μs UInt8 10.1±0.1μs UInt16 11.0±0.04μs UInt32 11.8±0.5μs UInt64 12.7±0.08μs category 2.62±0.03μs interval 6.72±0.2μs datetime64[ns, UTC] 2.93±0.1μs period[D] 52.6±0.6μs ============================================ ============= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24541
2019-01-02T01:02:23Z
2019-01-04T13:55:44Z
2019-01-04T13:55:44Z
2019-01-04T13:55:44Z
WIP: make _holder changeover
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 9662c59dddf4c..d0caeb3333548 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -228,6 +228,11 @@ static PyObject *get_values(PyObject *obj) { PRINTMARK(); if (values && !PyArray_CheckExact(values)) { + + if (PyObject_HasAttrString(values, "to_numpy")) { + values = PyObject_CallMethod(values, "to_numpy", NULL); + } + if (PyObject_HasAttrString(values, "values")) { PyObject *subvals = get_values(values); PyErr_Clear(); @@ -279,8 +284,8 @@ static PyObject *get_values(PyObject *obj) { repr = PyString_FromString("<unknown dtype>"); } - PyErr_Format(PyExc_ValueError, "%s or %s are not JSON serializable yet", - PyString_AS_STRING(repr), PyString_AS_STRING(typeRepr)); + PyErr_Format(PyExc_ValueError, "%R or %R are not JSON serializable yet", + repr, typeRepr); Py_DECREF(repr); Py_DECREF(typeRepr); diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ab5621d857e89..89fc90589a6e4 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -414,6 +414,17 @@ def _formatter(self, boxed=False): # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods + @property + def transpose(self): + # no-op because we are always 1-D + return self + + T = transpose + + def ravel(self, method=None): + # no-op because we are always 1-D + return self + @property def nbytes(self): return self._data.nbytes diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 3f32b7b7dcea9..8fefbefc09eef 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -19,7 +19,7 @@ is_extension_type, is_float_dtype, is_int64_dtype, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries, ABCPandasArray from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -240,11 +240,14 @@ def _simple_new(cls, values, freq=None, tz=None): result._dtype = dtype return result - def __new__(cls, values, freq=None, tz=None, dtype=None, copy=False, - dayfirst=False, yearfirst=False, ambiguous='raise'): - return cls._from_sequence( + def __init__(self, values, freq=None, tz=None, dtype=None, copy=False, + dayfirst=False, yearfirst=False, ambiguous='raise'): + result = type(self)._from_sequence( values, freq=freq, tz=tz, dtype=dtype, copy=copy, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) + self._data = result._data + self._freq = result._freq + self._dtype = result._dtype @classmethod def _from_sequence(cls, data, dtype=None, copy=False, @@ -523,7 +526,9 @@ def _ndarray_values(self): @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__) def _validate_fill_value(self, fill_value): - if isna(fill_value): + if isna(fill_value) or fill_value == iNaT: + # FIXME: shouldn't allow iNaT through here; see discussion + # in GH#24024 fill_value = iNaT elif isinstance(fill_value, (datetime, np.datetime64)): self._assert_tzawareness_compat(fill_value) @@ -1568,6 +1573,8 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, copy = False elif isinstance(data, ABCSeries): data = data._values + elif isinstance(data, ABCPandasArray): + data = data.to_numpy() if hasattr(data, "freq"): # i.e. DatetimeArray/Index diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index e1141c6b6b3a8..293ce7d8e4aca 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -10,8 +10,7 @@ from pandas.core.dtypes.dtypes import ( CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, ExtensionDtype, - IntervalDtype, PandasExtensionDtype, PeriodDtype, _pandas_registry, - registry) + IntervalDtype, PandasExtensionDtype, PeriodDtype, registry) from pandas.core.dtypes.generic import ( ABCCategorical, ABCCategoricalIndex, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ABCSparseArray, @@ -1984,7 +1983,7 @@ def pandas_dtype(dtype): return dtype # registered extension types - result = _pandas_registry.find(dtype) or registry.find(dtype) + result = registry.find(dtype) if result is not None: return result diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 0501889d743d4..e6967ed2a4d3d 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -12,8 +12,8 @@ is_extension_array_dtype, is_interval_dtype, is_object_dtype, is_period_dtype, is_sparse, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCDatetimeIndex, ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame, - ABCTimedeltaIndex) + ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, ABCPeriodIndex, + ABCRangeIndex, ABCSparseDataFrame, ABCTimedeltaIndex) from pandas import compat @@ -471,7 +471,15 @@ def _concat_datetimetz(to_concat, name=None): all inputs must be DatetimeIndex it is used in DatetimeIndex.append also """ - return to_concat[0]._concat_same_dtype(to_concat, name=name) + # Right now, internals will pass a List[DatetimeArray] here + # for reductions like quantile. I would like to disentangle + # all this before we get here. + sample = to_concat[0] + + if isinstance(sample, ABCIndexClass): + return sample._concat_same_dtype(to_concat, name=name) + elif isinstance(sample, ABCDatetimeArray): + return sample._concat_same_type(to_concat) def _concat_index_same_dtype(indexes, klass=None): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e0d0cf3393dd5..b4a46ef049809 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -479,8 +479,8 @@ def _is_boolean(self): return is_bool_dtype(self.categories) -class DatetimeTZDtype(PandasExtensionDtype): - +@register_extension_dtype +class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype. @@ -493,6 +493,7 @@ class DatetimeTZDtype(PandasExtensionDtype): str = '|M8[ns]' num = 101 base = np.dtype('M8[ns]') + na_value = NaT _metadata = ('unit', 'tz') _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache = {} @@ -570,8 +571,8 @@ def construct_array_type(cls): ------- type """ - from pandas import DatetimeIndex - return DatetimeIndex + from pandas.core.arrays import DatetimeArrayMixin + return DatetimeArrayMixin @classmethod def construct_from_string(cls, string): @@ -885,10 +886,3 @@ def is_dtype(cls, dtype): else: return False return super(IntervalDtype, cls).is_dtype(dtype) - - -# TODO(Extension): remove the second registry once all internal extension -# dtypes are real extension dtypes. -_pandas_registry = Registry() - -_pandas_registry.register(DatetimeTZDtype) diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index bbc447d6fa0da..5c659b35bd0cc 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -67,6 +67,8 @@ def _check(cls, inst): ("extension", "categorical", "periodarray", + "datetimearray", + "timedeltaarray", "npy_extension", )) ABCPandasArray = create_pandas_abc_type("ABCPandasArray", diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c9ed2521676ad..fca789030f9a4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -35,11 +35,11 @@ import pandas.core.algorithms as algos from pandas.core.arrays import ( - Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray) + Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray, + TimedeltaArrayMixin as TimedeltaArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_setitem_lengths from pandas.core.internals.arrays import extract_array import pandas.core.missing as missing @@ -2169,7 +2169,7 @@ class DatetimeLikeBlockMixin(object): @property def _holder(self): - return DatetimeIndex + return DatetimeArray @property def _na_value(self): @@ -2179,15 +2179,33 @@ def _na_value(self): def fill_value(self): return tslibs.iNaT + def to_dense(self): + # TODO(DatetimeBlock): remove + return np.asarray(self.values) + def get_values(self, dtype=None): """ return object dtype as boxed values, such as Timestamps/Timedelta """ if is_object_dtype(dtype): - return lib.map_infer(self.values.ravel(), - self._box_func).reshape(self.values.shape) + values = self.values + + if self.ndim > 1: + values = values.ravel() + + values = lib.map_infer(values, self._box_func) + + if self.ndim > 1: + values = values.reshape(self.values.shape) + + return values + return self.values + @property + def asi8(self): + return self.values.view('i8') + class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = () @@ -2198,13 +2216,15 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): def __init__(self, values, placement, ndim=None): if values.dtype != _TD_DTYPE: values = conversion.ensure_timedelta64ns(values) - + if isinstance(values, TimedeltaArray): + values = values._data + assert isinstance(values, np.ndarray), type(values) super(TimeDeltaBlock, self).__init__(values, placement=placement, ndim=ndim) @property def _holder(self): - return TimedeltaIndex + return TimedeltaArray @property def _box_func(self): @@ -2299,6 +2319,9 @@ def to_native_types(self, slicer=None, na_rep=None, quoting=None, dtype=object) return rvalues + def external_values(self, dtype=None): + return np.asarray(self.values.astype("timedelta64[ns]", copy=False)) + class BoolBlock(NumericBlock): __slots__ = () @@ -2771,6 +2794,10 @@ def _maybe_coerce_values(self, values): """ if values.dtype != _NS_DTYPE: values = conversion.ensure_datetime64ns(values) + if isinstance(values, DatetimeArray): + values = values._data + + assert isinstance(values, np.ndarray), type(values) return values def _astype(self, dtype, **kwargs): @@ -2857,15 +2884,17 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None, """ convert to our native types format, slicing if desired """ values = self.values + i8values = self.asi8 + if slicer is not None: - values = values[..., slicer] + i8values = i8values[..., slicer] from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime( - values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(values.shape) + i8values.ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(i8values.shape) return np.atleast_2d(result) def should_store(self, value): @@ -2885,12 +2914,16 @@ def set(self, locs, values, check=False): self.values[locs] = values + def external_values(self): + return np.asarray(self.values.astype('datetime64[ns]', copy=False)) -class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): + +class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ __slots__ = () _concatenator = staticmethod(_concat._concat_datetime) is_datetimetz = True + is_extension = True def __init__(self, values, placement, ndim=2, dtype=None): # XXX: This will end up calling _maybe_coerce_values twice @@ -2905,6 +2938,10 @@ def __init__(self, values, placement, ndim=2, dtype=None): super(DatetimeTZBlock, self).__init__(values, placement=placement, ndim=ndim) + @property + def _holder(self): + return DatetimeArray + def _maybe_coerce_values(self, values, dtype=None): """Input validation for values passed to __init__. Ensure that we have datetime64TZ, coercing if necessary. @@ -2926,7 +2963,8 @@ def _maybe_coerce_values(self, values, dtype=None): if dtype is not None: if isinstance(dtype, compat.string_types): dtype = DatetimeTZDtype.construct_from_string(dtype) - values = values._shallow_copy(tz=dtype.tz) + values = type(values)(values, dtype=dtype) + # TODO: shouldn't this be done via tz_convert? if values.tz is None: raise ValueError("cannot create a DatetimeTZBlock without a tz") @@ -2936,8 +2974,8 @@ def _maybe_coerce_values(self, values, dtype=None): @property def is_view(self): """ return a boolean if I am possibly a view """ - # check the ndarray values of the DatetimeIndex values - return self.values.values.base is not None + # check the ndarray values of the DatetimeArray values + return self.values._data.base is not None def copy(self, deep=True): """ copy constructor """ @@ -2946,18 +2984,40 @@ def copy(self, deep=True): values = values.copy(deep=True) return self.make_block_same_class(values) - def external_values(self): - """ we internally represent the data as a DatetimeIndex, but for - external compat with ndarray, export as a ndarray of Timestamps + def get_values(self, dtype=None): """ - return self.values.astype('datetime64[ns]').values + Returns an ndarray of values. - def get_values(self, dtype=None): + Parameters + ---------- + dtype : np.dtype + Only `object`-like dtypes are respected here (not sure + why). + + Returns + ------- + values : ndarray + When ``dtype=object``, then and object-dtype ndarray of + boxed values is returned. Otherwise, an M8[ns] ndarray + is returned. + + DatetimeArray is always 1-d. ``get_values`` will reshape + the return value to be the same dimensionality as the + block. + """ # return object dtype as Timestamps with the zones + values = self.values if is_object_dtype(dtype): - return lib.map_infer( - self.values.ravel(), self._box_func).reshape(self.values.shape) - return self.values + values = values._box_values(values._data) + + values = np.asarray(values) + + if self.ndim == 2: + # Ensure that our shape is correct for DataFrame. + # ExtensionArrays are always 1-D, even in a DataFrame when + # the analogous NumPy-backed column would be a 2-D ndarray. + values = values.reshape(1, -1) + return values def _slice(self, slicer): """ return a slice of my values """ @@ -2982,13 +3042,19 @@ def _try_coerce_args(self, values, other): base-type values, base-type other """ # asi8 is a view, needs copy - values = _block_shape(values.asi8, ndim=self.ndim) + values = _block_shape(values.view("i8"), ndim=self.ndim) if isinstance(other, ABCSeries): other = self._holder(other) if isinstance(other, bool): raise TypeError + elif is_datetime64_dtype(other): + # add the tz back + # FIXME: we shouldn't be ravelling at this point, but Otherwise + # this raises on tests.frame.test_quantile.test_quantile_box + other = self._holder(other.ravel(), dtype=self.dtype) + elif (is_null_datelike_scalar(other) or (lib.is_scalar(other) and isna(other))): other = tslibs.iNaT @@ -3023,7 +3089,8 @@ def _try_coerce_result(self, result): result = result.reshape(np.prod(result.shape)) # GH#24096 new values invalidates a frequency - result = self.values._shallow_copy(result, freq=None) + result = self._holder._simple_new(result, freq=None, + tz=self.values.tz) return result @@ -3031,32 +3098,6 @@ def _try_coerce_result(self, result): def _box_func(self): return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - def shift(self, periods, axis=0, fill_value=None): - """ shift the block by periods """ - - # think about moving this to the DatetimeIndex. This is a non-freq - # (number of periods) shift ### - - N = len(self) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - - new_values = self.values.asi8.take(indexer) - - if isna(fill_value): - fill_value = tslibs.iNaT - if periods > 0: - new_values[:periods] = fill_value - else: - new_values[periods:] = fill_value - - new_values = self.values._shallow_copy(new_values) - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - def diff(self, n, axis=0): """1st discrete difference @@ -3086,14 +3127,45 @@ def diff(self, n, axis=0): return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be non-tz dtype - return make_block( - values, placement=placement or slice(0, len(values), 1)) + # need to handle concat([tz1, tz2]) here, since DatetimeArray + # only handles cases where all the tzs are the same. + # Instead of placing the condition here, it could also go into the + # is_uniform_join_units check, but I'm not sure what is better + + if len({x.dtype for x in to_concat}) > 1: + values = _concat._concat_datetime([x.values for x in to_concat]) + placement = placement or slice(0, len(values), 1) + + if self.ndim > 1: + values = np.atleast_2d(values) + return ObjectBlock(values, ndim=self.ndim, placement=placement) + return super(DatetimeTZBlock, self).concat_same_type(to_concat, + placement) + + def fillna(self, value, limit=None, inplace=False, downcast=None): + # We support filling a DatetimeTZ with a `value` whose timezone + # is different by coercing to object. + try: + return super(DatetimeTZBlock, self).fillna( + value, limit, inplace, downcast + ) + except (ValueError, TypeError): + # different timezones, or a non-tz + return self.astype(object).fillna( + value, limit=limit, inplace=inplace, downcast=downcast + ) + + def setitem(self, indexer, value): + # https://github.com/pandas-dev/pandas/issues/24020 + # Need a dedicated setitem until #24020 (type promotion in setitem + # for extension arrays) is designed and implemented. + try: + return super(DatetimeTZBlock, self).setitem(indexer, value) + except (ValueError, TypeError): + newb = make_block(self.values.astype(object), + placement=self.mgr_locs, + klass=ObjectBlock,) + return newb.setitem(indexer, value) # ----------------------------------------------------------------- @@ -3115,8 +3187,16 @@ def get_block_type(values, dtype=None): dtype = dtype or values.dtype vtype = dtype.type - if is_categorical(values): + if is_sparse(dtype): + # Need this first(ish) so that Sparse[datetime] is sparse + cls = ExtensionBlock + elif is_categorical(values): cls = CategoricalBlock + elif issubclass(vtype, np.datetime64): + assert not is_datetime64tz_dtype(values) + cls = DatetimeBlock + elif is_datetime64tz_dtype(values): + cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): @@ -3128,11 +3208,6 @@ def get_block_type(values, dtype=None): cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock - elif issubclass(vtype, np.datetime64): - assert not is_datetime64tz_dtype(values) - cls = DatetimeBlock - elif is_datetime64tz_dtype(values): - cls = DatetimeTZBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index b18b966406bbb..b3c893c7d84be 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -589,7 +589,7 @@ def sanitize_array(data, index, dtype=None, copy=False, # everything else in this block must also handle ndarray's, # becuase we've unwrapped PandasArray into an ndarray. - if dtype is not None and not data.dtype.is_dtype(dtype): + if dtype is not None: subarr = data.astype(dtype) if copy: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 7cab52ddda87f..3c966b036aac8 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1539,17 +1539,20 @@ def wrapper(left, right): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) - elif (is_extension_array_dtype(left) or - (is_extension_array_dtype(right) and not is_scalar(right))): - # GH#22378 disallow scalar to exclude e.g. "category", "Int64" - return dispatch_to_extension_op(op, left, right) - elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + # Give dispatch_to_index_op a chance for tests like + # test_dt64_series_add_intlike, which the index dispatching handles + # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) + elif (is_extension_array_dtype(left) or + (is_extension_array_dtype(right) and not is_scalar(right))): + # GH#22378 disallow scalar to exclude e.g. "category", "Int64" + return dispatch_to_extension_op(op, left, right) + elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 77dc04e9453a9..baffea3cd21d2 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, registry, _pandas_registry) + IntervalDtype, CategoricalDtype, registry) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, @@ -804,14 +804,6 @@ def test_registry(dtype): assert dtype in registry.dtypes -@pytest.mark.parametrize('dtype', [ - DatetimeTZDtype, -]) -def test_pandas_registry(dtype): - assert dtype not in registry.dtypes - assert dtype in _pandas_registry.dtypes - - @pytest.mark.parametrize('dtype, expected', [ ('int64', None), ('interval', IntervalDtype()), @@ -824,13 +816,6 @@ def test_registry_find(dtype, expected): assert registry.find(dtype) == expected -@pytest.mark.parametrize('dtype, expected', [ - ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern')), -]) -def test_pandas_registry_find(dtype, expected): - assert _pandas_registry.find(dtype) == expected - - @pytest.mark.parametrize('dtype, expected', [ (str, False), (int, False), diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index d58b7ddf29123..bd50584406312 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -157,6 +157,12 @@ def astype(self, dtype, copy=True): # NumPy has issues when all the dicts are the same length. # np.array([UserDict(...), UserDict(...)]) fails, # but np.array([{...}, {...}]) works, so cast. + + # needed to add this check for the Series constructor + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self return np.array([dict(x) for x in self], dtype=dtype, copy=copy) def unique(self): diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 2bc4bf5df2298..db3f3b80bca6b 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -77,14 +77,6 @@ def test_astype_no_copy(): assert arr is not result -@pytest.mark.parametrize('dtype', [ - dtypes.DatetimeTZDtype('ns', 'US/Central'), -]) -def test_is_not_extension_array_dtype(dtype): - assert not isinstance(dtype, dtypes.ExtensionDtype) - assert not is_extension_array_dtype(dtype) - - @pytest.mark.parametrize('dtype', [ dtypes.CategoricalDtype(), dtypes.IntervalDtype(), diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py new file mode 100644 index 0000000000000..7c4491d6edbcf --- /dev/null +++ b/pandas/tests/extension/test_datetime.py @@ -0,0 +1,237 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["US/Central"]) +def dtype(request): + return DatetimeTZDtype(unit="ns", tz=request.param) + + +@pytest.fixture +def data(dtype): + data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), + dtype=dtype) + return data + + +@pytest.fixture +def data_missing(dtype): + return DatetimeArray( + np.array(['NaT', '2000-01-01'], dtype='datetime64[ns]'), + dtype=dtype + ) + + +@pytest.fixture +def data_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + return DatetimeArray(np.array([b, c, a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + return DatetimeArray(np.array([b, 'NaT', a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + na = 'NaT' + return DatetimeArray(np.array([b, b, na, na, a, a, b, c], + dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return a is pd.NaT and a is b + return cmp + + +@pytest.fixture +def na_value(): + return pd.NaT + + +# ---------------------------------------------------------------------------- +class BaseDatetimeTests(object): + pass + + +# ---------------------------------------------------------------------------- +# Tests +class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): + pass + + +class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): + pass + + +class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): + pass + + +class TestMethods(BaseDatetimeTests, base.BaseMethodsTests): + @pytest.mark.skip(reason="Incorrect expected") + def test_value_counts(self, all_data, dropna): + pass + + def test_combine_add(self, data_repeated): + # Timestamp.__add__(Timestamp) not defined + pass + + +class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests): + + def test_array_interface(self, data): + if data.tz: + # np.asarray(DTA) is currently always tz-naive. + pytest.skip("GH-23569") + else: + super(TestInterface, self).test_array_interface(data) + + +class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): + implements = {'__sub__', '__rsub__'} + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_add_series_with_extension_array(self, data): + # Datetime + Datetime not implemented + s = pd.Series(data) + msg = 'cannot add DatetimeArray(Mixin)? and DatetimeArray(Mixin)?' + with pytest.raises(TypeError, match=msg): + s + data + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_error(self, data, all_arithmetic_operators): + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + # Right now, we have trouble with this. Returning NotImplemented + # fails other tests like + # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: + # test_dt64_seris_add_intlike + return super( + TestArithmeticOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestCasting(BaseDatetimeTests, base.BaseCastingTests): + pass + + +class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): + + def _compare_other(self, s, data, op_name, other): + # the base test is not appropriate for us. We raise on comparison + # with (some) integers, depending on the value. + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + return super( + TestComparisonOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestMissing(BaseDatetimeTests, base.BaseMissingTests): + pass + + +class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): + + @pytest.mark.skip(reason="We have DatetimeTZBlock") + def test_concat(self, data, in_frame): + pass + + def test_concat_mixed_dtypes(self, data): + # concat(Series[datetimetz], Series[category]) uses a + # plain np.array(values) on the DatetimeArray, which + # drops the tz. + super(TestReshaping, self).test_concat_mixed_dtypes(data) + + @pytest.mark.parametrize("obj", ["series", "frame"]) + def test_unstack(self, obj): + # GH-13287: can't use base test, since building the expected fails. + data = DatetimeArray._from_sequence(['2000', '2001', '2002', '2003'], + tz='US/Central') + index = pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), + names=['a', 'b']) + + if obj == "series": + ser = pd.Series(data, index=index) + expected = pd.DataFrame({ + "A": data.take([0, 1]), + "B": data.take([2, 3]) + }, index=pd.Index(['a', 'b'], name='b')) + expected.columns.name = 'a' + + else: + ser = pd.DataFrame({"A": data, "B": data}, index=index) + expected = pd.DataFrame( + {("A", "A"): data.take([0, 1]), + ("A", "B"): data.take([2, 3]), + ("B", "A"): data.take([0, 1]), + ("B", "B"): data.take([2, 3])}, + index=pd.Index(['a', 'b'], name='b') + ) + expected.columns.names = [None, 'a'] + + result = ser.unstack(0) + self.assert_equal(result, expected) + + +class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): + pass + + +class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests): + pass + + +class TestPrinting(BaseDatetimeTests, base.BasePrintingTests): + pass diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index a21d0104b0d04..b877ed93f07a2 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -3244,8 +3244,8 @@ def test_setitem(self): # are copies) b1 = df._data.blocks[1] b2 = df._data.blocks[2] - assert b1.values.equals(b2.values) - assert id(b1.values.values.base) != id(b2.values.values.base) + tm.assert_extension_array_equal(b1.values, b2.values) + assert id(b1.values._data.base) != id(b2.values._data.base) # with nan df2 = df.copy() diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 26cd39c4b807c..f7eb7e8443f55 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -11,8 +11,12 @@ from distutils.version import LooseVersion import itertools from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, - Series, Categorical, TimedeltaIndex, SparseArray) + Series, Categorical, SparseArray) from pandas.compat import OrderedDict, lrange +from pandas.core.arrays import ( + DatetimeArrayMixin as DatetimeArray, + TimedeltaArrayMixin as TimedeltaArray, +) from pandas.core.internals import (SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos @@ -290,7 +294,7 @@ def test_make_block_same_class(self): block = create_block('M8[ns, US/Eastern]', [3]) with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): - block.make_block_same_class(block.values.values, + block.make_block_same_class(block.values, dtype=block.values.dtype) @@ -451,7 +455,7 @@ def test_copy(self, mgr): assert cp_blk.values.base is blk.values.base else: # DatetimeTZBlock has DatetimeIndex values - assert cp_blk.values.values.base is blk.values.values.base + assert cp_blk.values._data.base is blk.values._data.base cp = mgr.copy(deep=True) for blk, cp_blk in zip(mgr.blocks, cp.blocks): @@ -460,7 +464,7 @@ def test_copy(self, mgr): # some blocks it is an array (e.g. datetimetz), but was copied assert cp_blk.equals(blk) if not isinstance(cp_blk.values, np.ndarray): - assert cp_blk.values.values.base is not blk.values.values.base + assert cp_blk.values._data.base is not blk.values._data.base else: assert cp_blk.values.base is None and blk.values.base is None @@ -1258,9 +1262,9 @@ def test_binop_other(self, op, value, dtype): @pytest.mark.parametrize('typestr, holder', [ ('category', Categorical), - ('M8[ns]', DatetimeIndex), - ('M8[ns, US/Central]', DatetimeIndex), - ('m8[ns]', TimedeltaIndex), + ('M8[ns]', DatetimeArray), + ('M8[ns, US/Central]', DatetimeArray), + ('m8[ns]', TimedeltaArray), ('sparse', SparseArray), ]) def test_holder(typestr, holder): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 0706cb12ac5d0..4bc8d41d11823 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1016,13 +1016,17 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): s = Series({'date': date, 'a': 1.0, 'b': 2.0}) df = DataFrame(columns=['c', 'd']) result = df.append(s, ignore_index=True) + # n.b. it's not clear to me that expected is correct here. + # It's possible that the `date` column should have + # datetime64[ns, tz] dtype for both result and expected. + # that would be more consistent with new columns having + # their own dtype (float for a and b, datetime64ns, tz for date). expected = DataFrame([[np.nan, np.nan, 1., 2., date]], - columns=['c', 'd', 'a', 'b', 'date']) + columns=['c', 'd', 'a', 'b', 'date'], + dtype=object) # These columns get cast to object after append - object_cols = ['c', 'd', 'date'] - expected.loc[:, object_cols] = expected.loc[:, object_cols].astype( - object - ) + expected['a'] = expected['a'].astype(float) + expected['b'] = expected['b'].astype(float) assert_frame_equal(result, expected)
cc @jreback @TomAugspurger changes _holder from DTI/TDI to DTA/TDA, pretty much all the diff from blocks.internals and core.dtypes Not yet green locally, but I'm presenting this as a viable alternative to the all-at-once still in #24024
https://api.github.com/repos/pandas-dev/pandas/pulls/24540
2019-01-01T23:21:06Z
2019-01-02T21:23:00Z
null
2019-01-04T16:02:24Z
Cython language level 3
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 71e25c3955a6d..c1fc0062dff09 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -57,10 +57,10 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: n -= na_count if n % 2: - result = kth_smallest_c( a, n / 2, n) + result = kth_smallest_c( a, n // 2, n) else: - result = (kth_smallest_c(a, n / 2, n) + - kth_smallest_c(a, n / 2 - 1, n)) / 2 + result = (kth_smallest_c(a, n // 2, n) + + kth_smallest_c(a, n // 2 - 1, n)) / 2 if na_count: free(a) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index f679746643643..36c4c752206a8 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -948,7 +948,7 @@ cdef class TextReader: status = tokenize_nrows(self.parser, nrows) if self.parser.warn_msg != NULL: - print >> sys.stderr, self.parser.warn_msg + print(self.parser.warn_msg, file=sys.stderr) free(self.parser.warn_msg) self.parser.warn_msg = NULL @@ -976,7 +976,7 @@ cdef class TextReader: status = tokenize_all_rows(self.parser) if self.parser.warn_msg != NULL: - print >> sys.stderr, self.parser.warn_msg + print(self.parser.warn_msg, file=sys.stderr) free(self.parser.warn_msg) self.parser.warn_msg = NULL diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 624872c1c56c6..44ea875f0b49d 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -275,7 +275,7 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, dts.sec) if show_ns: - ns = dts.ps / 1000 + ns = dts.ps // 1000 res += '.%.9d' % (ns + 1000 * dts.us) elif show_us: res += '.%.6d' % dts.us diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index c48812acd3de1..9c88ca05ebcf0 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -159,7 +159,7 @@ cpdef int32_t get_week_of_year(int year, int month, int day) nogil: # estimate woy = (doy - 1) - dow + 3 if woy >= 0: - woy = woy / 7 + 1 + woy = woy // 7 + 1 # verify if woy < 0: diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 1c0adaaa288a9..d8c3b91d1e460 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -462,8 +462,8 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour, obj.dts.min, obj.dts.sec, obj.dts.us, obj.tzinfo) - obj = convert_datetime_to_tsobject(dt, tz, - nanos=obj.dts.ps / 1000) + obj = convert_datetime_to_tsobject( + dt, tz, nanos=obj.dts.ps // 1000) return obj else: diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 240f008394099..dfd8c86c92c86 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -478,7 +478,7 @@ def get_date_field(int64_t[:] dtindex, object field): continue dt64_to_dtstruct(dtindex[i], &dts) - out[i] = dts.ps / 1000 + out[i] = dts.ps // 1000 return out elif field == 'doy': with nogil: @@ -522,7 +522,7 @@ def get_date_field(int64_t[:] dtindex, object field): dt64_to_dtstruct(dtindex[i], &dts) out[i] = dts.month - out[i] = ((out[i] - 1) / 3) + 1 + out[i] = ((out[i] - 1) // 3) + 1 return out elif field == 'dim': diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e28462f7103b9..7e98fba48b51a 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -587,7 +587,7 @@ def shift_day(other: datetime, days: int) -> datetime: cdef inline int year_add_months(npy_datetimestruct dts, int months) nogil: """new year number after shifting npy_datetimestruct number of months""" - return dts.year + (dts.month + months - 1) / 12 + return dts.year + (dts.month + months - 1) // 12 cdef inline int month_add_months(npy_datetimestruct dts, int months) nogil: diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 87658ae92175e..f6866f797d576 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -240,7 +240,7 @@ def array_strptime(object[:] values, object fmt, s += "0" * (9 - len(s)) us = long(s) ns = us % 1000 - us = us / 1000 + us = us // 1000 elif parse_code == 11: weekday = locale_time.f_weekday.index(found_dict['A'].lower()) elif parse_code == 12: @@ -662,7 +662,7 @@ cdef parse_timezone_directive(object z): gmtoff_remainder_padding = "0" * pad_number microseconds = int(gmtoff_remainder + gmtoff_remainder_padding) - total_minutes = ((hours * 60) + minutes + (seconds / 60) + - (microseconds / 60000000)) + total_minutes = ((hours * 60) + minutes + (seconds // 60) + + (microseconds // 60000000)) total_minutes = -total_minutes if z.startswith("-") else total_minutes return pytz.FixedOffset(total_minutes) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 5918c7963acf7..e1788db1cf8f8 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -587,7 +587,7 @@ def _binary_op_method_timedeltalike(op, name): # the PyDateTime_CheckExact case is for a datetime object that # is specifically *not* a Timestamp, as the Timestamp case will be # handled after `_validate_ops_compat` returns False below - from timestamps import Timestamp + from pandas._libs.tslibs.timestamps import Timestamp return op(self, Timestamp(other)) # We are implicitly requiring the canonical behavior to be # defined by Timestamp methods. diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 8d825e0a6179e..c4d47a3c2384a 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -70,7 +70,7 @@ cdef inline object create_timestamp_from_ts(int64_t value, dts.sec, dts.us, tz) ts_base.value = value ts_base.freq = freq - ts_base.nanosecond = dts.ps / 1000 + ts_base.nanosecond = dts.ps // 1000 return ts_base diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 6449a331689ad..8f035d0c205e3 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -16,7 +16,6 @@ from numpy cimport ndarray, uint8_t ctypedef fused pandas_string: str - unicode bytes diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 9b8fba16741f6..ed6a3efae137c 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -2,7 +2,7 @@ # cython: boundscheck=False, initializedcheck=False import numpy as np -import sas_constants as const +import pandas.io.sas.sas_constants as const ctypedef signed long long int64_t ctypedef unsigned char uint8_t diff --git a/setup.py b/setup.py index d58d444f9a481..09e1e226881fd 100755 --- a/setup.py +++ b/setup.py @@ -451,7 +451,7 @@ def run(self): # pinning `ext.cython_directives = directives` to each ext in extensions. # github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy directives = {'linetrace': False, - 'language_level': 2} + 'language_level': 3} macros = [] if linetrace: # https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
- [X] closes #23927
https://api.github.com/repos/pandas-dev/pandas/pulls/24538
2019-01-01T20:25:59Z
2019-03-19T23:48:32Z
2019-03-19T23:48:32Z
2020-01-16T00:34:36Z
implement fillna from 24024, with fixes and tests
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 98a1f1b925447..ab5621d857e89 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -16,6 +16,7 @@ from pandas.errors import ( AbstractMethodError, NullFrequencyError, PerformanceWarning) from pandas.util._decorators import Appender, Substitution +from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical_dtype, is_datetime64_any_dtype, @@ -25,9 +26,10 @@ is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, needs_i8_conversion, pandas_dtype) from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries +from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import isna -from pandas.core import nanops +from pandas.core import missing, nanops from pandas.core.algorithms import ( checked_add_with_arr, take, unique1d, value_counts) import pandas.core.common as com @@ -787,6 +789,52 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): result[self._isnan] = fill_value return result + def fillna(self, value=None, method=None, limit=None): + # TODO(GH-20300): remove this + # Just overriding to ensure that we avoid an astype(object). + # Either 20300 or a `_values_for_fillna` would avoid this duplication. + if isinstance(value, ABCSeries): + value = value.array + + value, method = validate_fillna_kwargs(value, method) + + mask = self.isna() + + if is_array_like(value): + if len(value) != len(self): + raise ValueError("Length of 'value' does not match. Got ({}) " + " expected {}".format(len(value), len(self))) + value = value[mask] + + if mask.any(): + if method is not None: + if method == 'pad': + func = missing.pad_1d + else: + func = missing.backfill_1d + + values = self._data + if not is_period_dtype(self): + # For PeriodArray self._data is i8, which gets copied + # by `func`. Otherwise we need to make a copy manually + # to avoid modifying `self` in-place. + values = values.copy() + + new_values = func(values, limit=limit, + mask=mask) + if is_datetime64tz_dtype(self): + # we need to pass int64 values to the constructor to avoid + # re-localizing incorrectly + new_values = new_values.view("i8") + new_values = type(self)(new_values, dtype=self.dtype) + else: + # fill with value + new_values = self.copy() + new_values[mask] = value + else: + new_values = self.copy() + return new_values + # ------------------------------------------------------------------ # Frequency Properties/Methods diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 5a74f04c237d0..7199d88d4bde5 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -12,11 +12,10 @@ from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds import pandas.compat as compat from pandas.util._decorators import Appender, cache_readonly -from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( - _TD_DTYPE, ensure_object, is_array_like, is_datetime64_dtype, - is_float_dtype, is_list_like, is_period_dtype, pandas_dtype) + _TD_DTYPE, ensure_object, is_datetime64_dtype, is_float_dtype, + is_list_like, is_period_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ABCIndexClass, ABCPeriodIndex, ABCSeries from pandas.core.dtypes.missing import isna, notna @@ -24,7 +23,6 @@ import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com -from pandas.core.missing import backfill_1d, pad_1d from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick @@ -381,41 +379,6 @@ def _validate_fill_value(self, fill_value): "Got '{got}'.".format(got=fill_value)) return fill_value - def fillna(self, value=None, method=None, limit=None): - # TODO(#20300) - # To avoid converting to object, we re-implement here with the changes - # 1. Passing `_data` to func instead of self.astype(object) - # 2. Re-boxing output of 1. - # #20300 should let us do this kind of logic on ExtensionArray.fillna - # and we can use it. - - if isinstance(value, ABCSeries): - value = value._values - - value, method = validate_fillna_kwargs(value, method) - - mask = self.isna() - - if is_array_like(value): - if len(value) != len(self): - raise ValueError("Length of 'value' does not match. Got ({}) " - " expected {}".format(len(value), len(self))) - value = value[mask] - - if mask.any(): - if method is not None: - func = pad_1d if method == 'pad' else backfill_1d - new_values = func(self._data, limit=limit, - mask=mask) - new_values = type(self)(new_values, freq=self.freq) - else: - # fill with value - new_values = self.copy() - new_values[mask] = value - else: - new_values = self.copy() - return new_values - # -------------------------------------------------------------------- def _time_shift(self, periods, freq=None): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 1012639fe0f9d..ee9aa9e229126 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -13,7 +13,7 @@ from pandas.core.dtypes.common import ( ensure_float64, is_datetime64_dtype, is_datetime64tz_dtype, is_float_dtype, is_integer, is_integer_dtype, is_numeric_v_string_like, is_scalar, - needs_i8_conversion) + is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.missing import isna @@ -481,6 +481,10 @@ def pad_1d(values, limit=None, mask=None, dtype=None): _method = algos.pad_inplace_float64 elif values.dtype == np.object_: _method = algos.pad_inplace_object + elif is_timedelta64_dtype(values): + # NaTs are treated identically to datetime64, so we can dispatch + # to that implementation + _method = _pad_1d_datetime if _method is None: raise ValueError('Invalid dtype for pad_1d [{name}]' @@ -507,6 +511,10 @@ def backfill_1d(values, limit=None, mask=None, dtype=None): _method = algos.backfill_inplace_float64 elif values.dtype == np.object_: _method = algos.backfill_inplace_object + elif is_timedelta64_dtype(values): + # NaTs are treated identically to datetime64, so we can dispatch + # to that implementation + _method = _backfill_1d_datetime if _method is None: raise ValueError('Invalid dtype for backfill_1d [{name}]' diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 9ef331be32417..348ac4579ffb5 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -164,6 +164,20 @@ def test_reduce_invalid(self): with pytest.raises(TypeError, match='cannot perform'): arr._reduce("not a method") + @pytest.mark.parametrize('method', ['pad', 'backfill']) + def test_fillna_method_doesnt_change_orig(self, method): + data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9 + arr = self.array_cls(data, freq='D') + arr[4] = pd.NaT + + fill_value = arr[3] if method == 'pad' else arr[5] + + result = arr.fillna(method=method) + assert result[4] == fill_value + + # check that the original was not changed + assert arr[4] is pd.NaT + def test_searchsorted(self): data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9 arr = self.array_cls(data, freq='D') diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 9f0954d328f89..8a833d8197381 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -138,6 +138,23 @@ def test_value_counts_preserves_tz(self): index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('method', ['pad', 'backfill']) + def test_fillna_preserves_tz(self, method): + dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') + arr = DatetimeArray(dti, copy=True) + arr[2] = pd.NaT + + fill_val = dti[1] if method == 'pad' else dti[3] + expected = DatetimeArray([dti[0], dti[1], fill_val, dti[3], dti[4]], + freq=None, tz='US/Central') + + result = arr.fillna(method=method) + tm.assert_extension_array_equal(result, expected) + + # assert that arr and dti were not modified in-place + assert arr[2] is pd.NaT + assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') + class TestSequenceToDT64NS(object):
cc @jreback @TomAugspurger couple of issues with `fillna` needed sorting out - The DTA version was operating in-place (fixed+tested) - The TDA version would raise because it wasn't supported in core.missing (fixed+tested) - The DTA[tz] version would incorrectly re-localize using the existing constructors, i.e. was dependent on the constructor changes in 24024. With the edits here it is correct regardless of whether the constructor is changed.
https://api.github.com/repos/pandas-dev/pandas/pulls/24536
2019-01-01T19:36:10Z
2019-01-01T22:52:51Z
2019-01-01T22:52:51Z
2019-01-01T23:14:41Z
Make DTI[tz]._values and Series[tz]._values return DTA
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index a90cfa4e4c906..0501889d743d4 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -426,8 +426,7 @@ def _concat_datetime(to_concat, axis=0, typs=None): if any(typ.startswith('datetime') for typ in typs): if 'datetime' in typs: - to_concat = [np.array(x, copy=False).view(np.int64) - for x in to_concat] + to_concat = [x.astype(np.int64, copy=False) for x in to_concat] return _concatenate_2d(to_concat, axis=axis).view(_NS_DTYPE) else: # when to_concat has different tz, len(typs) > 1. @@ -451,7 +450,7 @@ def _convert_datetimelike_to_object(x): # if dtype is of datetimetz or timezone if x.dtype.kind == _NS_DTYPE.kind: if getattr(x, 'tz', None) is not None: - x = x.astype(object).values + x = np.asarray(x.astype(object)) else: shape = x.shape x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(), diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 5ed8bd45a6aff..5695d3d9e67f3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -316,6 +316,12 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): we require the we have a dtype compat for the values if we are passed a non-dtype compat, then coerce using the constructor """ + if isinstance(values, DatetimeArray): + values = DatetimeArray(values, freq=freq, tz=tz, dtype=dtype) + tz = values.tz + freq = values.freq + values = values._data + # DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes assert isinstance(values, np.ndarray), type(values) @@ -340,7 +346,7 @@ def _values(self): # tz-naive -> ndarray # tz-aware -> DatetimeIndex if self.tz is not None: - return self + return self._eadata else: return self.values @@ -629,6 +635,9 @@ def intersection(self, other): not other.freq.isAnchored() or (not self.is_monotonic or not other.is_monotonic)): result = Index.intersection(self, other) + # Invalidate the freq of `result`, which may not be correct at + # this point, depending on the values. + result.freq = None result = self._shallow_copy(result._values, name=result.name, tz=result.tz, freq=None) if result.freq is None: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 375b4ccbc122f..c9ed2521676ad 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -34,7 +34,8 @@ _isna_compat, array_equivalent, is_null_datelike_scalar, isna, notna) import pandas.core.algorithms as algos -from pandas.core.arrays import Categorical, ExtensionArray +from pandas.core.arrays import ( + Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex @@ -2437,8 +2438,14 @@ def _try_coerce_args(self, values, other): """ provide coercion to our input arguments """ if isinstance(other, ABCDatetimeIndex): - # to store DatetimeTZBlock as object - other = other.astype(object).values + # May get a DatetimeIndex here. Unbox it. + other = other.array + + if isinstance(other, DatetimeArray): + # hit in pandas/tests/indexing/test_coercion.py + # ::TestWhereCoercion::test_where_series_datetime64[datetime64tz] + # when falling back to ObjectBlock.where + other = other.astype(object) return values, other @@ -2985,7 +2992,8 @@ def _try_coerce_args(self, values, other): elif (is_null_datelike_scalar(other) or (lib.is_scalar(other) and isna(other))): other = tslibs.iNaT - elif isinstance(other, self._holder): + elif isinstance(other, (self._holder, DatetimeArray)): + # TODO: DatetimeArray check will be redundant after GH#24024 if other.tz != self.values.tz: raise ValueError("incompatible or non tz-aware value") other = _block_shape(other.asi8, ndim=self.ndim) diff --git a/pandas/core/series.py b/pandas/core/series.py index 672fa2edb00ba..3637081e09f8c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -477,7 +477,10 @@ def _values(self): """ Return the internal repr of this data. """ - return self._data.internal_values() + result = self._data.internal_values() + if isinstance(result, DatetimeIndex): + result = result._eadata + return result def _formatting_values(self): """ @@ -1602,10 +1605,6 @@ def unique(self): Categories (3, object): [a < b < c] """ result = super(Series, self).unique() - if isinstance(result, DatetimeIndex): - # TODO: This should be unnecessary after Series._values returns - # DatetimeArray - result = result._eadata return result def drop_duplicates(self, keep='first', inplace=False): diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 29b60d80750b2..280db3b2b3004 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -31,7 +31,7 @@ def has_test(combo): for combo in combos: if not has_test(combo): msg = 'test method is not defined: {0}, {1}' - raise AssertionError(msg.format(type(cls), combo)) + raise AssertionError(msg.format(cls.__name__, combo)) yield diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 50db4f67cc3cf..f941f2ff32fa1 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -17,12 +17,12 @@ PeriodIndex, Timedelta, IntervalIndex, Interval, CategoricalIndex, Timestamp, DataFrame, Panel) from pandas.core.arrays import ( + PandasArray, DatetimeArrayMixin as DatetimeArray, TimedeltaArrayMixin as TimedeltaArray, ) from pandas.compat import StringIO, PYPY, long from pandas.compat.numpy import np_array_datetime64_compat -from pandas.core.arrays import PandasArray from pandas.core.accessor import PandasDelegate from pandas.core.base import PandasObject, NoNewAttributesMixin from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin @@ -388,11 +388,9 @@ def test_value_counts_unique_nunique(self): for r in result: assert isinstance(r, Timestamp) - # TODO(#24024) once orig._values returns DTA, remove - # the `._eadata` below tm.assert_numpy_array_equal( result.astype(object), - orig._values._eadata.astype(object)) + orig._values.astype(object)) else: tm.assert_numpy_array_equal(result, orig.values) @@ -418,9 +416,7 @@ def test_value_counts_unique_nunique_null(self): else: o = o.copy() o[0:2] = iNaT - # TODO(#24024) once Series._values returns DTA, remove - # the `._eadata` here - values = o._values._eadata + values = o._values elif needs_i8_conversion(o): values[0:2] = iNaT @@ -1158,7 +1154,7 @@ def test_iter_box(self): (np.array(['a', 'b']), np.ndarray, 'object'), (pd.Categorical(['a', 'b']), pd.Categorical, 'category'), (pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'), - (pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex, + (pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), DatetimeArray, 'datetime64[ns, US/Central]'), (pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'), (pd.PeriodIndex([2018, 2019], freq='A'), pd.core.arrays.PeriodArray,
broken off of #24024, cc @jreback @TomAugspurger I think the edits in core.dtypes.concat are unrelated, but they are correct regardless and easy to trim a few more lines off the diff. The edit in tests.indexing.test_coercion was needed during troubleshooting, decided to keep it.
https://api.github.com/repos/pandas-dev/pandas/pulls/24534
2019-01-01T18:25:09Z
2019-01-01T20:07:38Z
2019-01-01T20:07:38Z
2019-01-01T21:35:03Z
CLN: Refactor some sorting code in Index set operations
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 07aec6a0d833f..1380c5caed1c9 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2302,27 +2302,15 @@ def union(self, other): allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) - try: - lvals[0] < other_diff[0] - except TypeError as e: - warnings.warn("%s, sort order is undefined for " - "incomparable objects" % e, RuntimeWarning, - stacklevel=3) - else: - types = frozenset((self.inferred_type, - other.inferred_type)) - if not types & _unsortable_types: - result.sort() - else: result = lvals - try: - result = np.sort(result) - except TypeError as e: - warnings.warn("%s, sort order is undefined for " - "incomparable objects" % e, RuntimeWarning, - stacklevel=3) + try: + result = sorting.safe_sort(result) + except TypeError as e: + warnings.warn("%s, sort order is undefined for " + "incomparable objects" % e, RuntimeWarning, + stacklevel=3) # for subclasses return self._wrap_setop_result(other, result) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index b13110a04e1c1..2108206a8cbe5 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -805,8 +805,7 @@ def test_union_name_preservation(self, first_list, second_list, first_name, def test_union_dt_as_obj(self): # TODO: Replace with fixturesult - with tm.assert_produces_warning(RuntimeWarning): - firstCat = self.strIndex.union(self.dateIndex) + firstCat = self.strIndex.union(self.dateIndex) secondCat = self.strIndex.union(self.strIndex) if self.dateIndex.dtype == np.object_: @@ -1615,7 +1614,7 @@ def test_drop_tuple(self, values, to_drop): @pytest.mark.parametrize("method,expected", [ ('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')], dtype=[('num', int), ('let', 'a1')])), - ('union', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), + ('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'), (2, 'C')], dtype=[('num', int), ('let', 'a1')])) ]) def test_tuple_union_bug(self, method, expected): @@ -2242,10 +2241,7 @@ def test_copy_name(self): s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) - warning_type = RuntimeWarning if PY3 else None - with tm.assert_produces_warning(warning_type): - # Python 3: Unorderable types - s3 = s1 * s2 + s3 = s1 * s2 assert s3.index.name == 'mario' @@ -2274,16 +2270,9 @@ def test_union_base(self): first = index[3:] second = index[:5] - if PY3: - # unorderable types - warn_type = RuntimeWarning - else: - warn_type = None - - with tm.assert_produces_warning(warn_type): - result = first.union(second) + result = first.union(second) - expected = Index(['b', 2, 'c', 0, 'a', 1]) + expected = Index([0, 1, 2, 'a', 'b', 'c']) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("klass", [ @@ -2294,14 +2283,7 @@ def test_union_different_type_base(self, klass): first = index[3:] second = index[:5] - if PY3: - # unorderable types - warn_type = RuntimeWarning - else: - warn_type = None - - with tm.assert_produces_warning(warn_type): - result = first.union(klass(second.values)) + result = first.union(klass(second.values)) assert tm.equalContents(result, index) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index f6fb5f0c46cc8..4d3c9926fc5ae 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -120,24 +120,12 @@ def test_operators_bitwise(self): s_0123 & [0.1, 4, 3.14, 2] # s_0123 will be all false now because of reindexing like s_tft - if compat.PY3: - # unable to sort incompatible object via .union. - exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3]) - with tm.assert_produces_warning(RuntimeWarning): - assert_series_equal(s_tft & s_0123, exp) - else: - exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) - assert_series_equal(s_tft & s_0123, exp) + exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) + assert_series_equal(s_tft & s_0123, exp) # s_tft will be all false now because of reindexing like s_0123 - if compat.PY3: - # unable to sort incompatible object via .union. - exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a']) - with tm.assert_produces_warning(RuntimeWarning): - assert_series_equal(s_0123 & s_tft, exp) - else: - exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) - assert_series_equal(s_0123 & s_tft, exp) + exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) + assert_series_equal(s_0123 & s_tft, exp) assert_series_equal(s_0123 & False, Series([False] * 4)) assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) @@ -280,11 +268,7 @@ def test_logical_ops_label_based(self): assert_series_equal(result, a[a]) for e in [Series(['z'])]: - if compat.PY3: - with tm.assert_produces_warning(RuntimeWarning): - result = a[a | e] - else: - result = a[a | e] + result = a[a | e] assert_series_equal(result, a[a]) # vs scalars
- [ ] Related to #24521 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This is a pre-cursor to #24521 and cleans up some of the sorting code in set operations on ``Index``.
https://api.github.com/repos/pandas-dev/pandas/pulls/24533
2019-01-01T18:20:57Z
2019-01-01T20:07:23Z
2019-01-01T20:07:23Z
2019-01-01T20:07:26Z
Run isort at pandas/tests/io
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index e83220a476f9b..598453eb92d25 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -2,7 +2,8 @@ Module for applying conditional formatting to DataFrames and Series. """ -from collections import MutableMapping, defaultdict + +from collections import defaultdict from contextlib import contextmanager import copy from functools import partial @@ -18,7 +19,7 @@ from pandas.core.dtypes.generic import ABCSeries import pandas as pd -from pandas.api.types import is_list_like +from pandas.api.types import is_dict_like, is_list_like import pandas.core.common as com from pandas.core.config import get_option from pandas.core.generic import _shared_docs @@ -401,7 +402,7 @@ def format(self, formatter, subset=None): row_locs = self.data.index.get_indexer_for(sub_df.index) col_locs = self.data.columns.get_indexer_for(sub_df.columns) - if isinstance(formatter, MutableMapping): + if is_dict_like(formatter): for col, col_formatter in formatter.items(): # formatter must be callable, so '{}' are converted to lambdas col_formatter = _maybe_wrap_formatter(col_formatter) diff --git a/pandas/tests/io/formats/test_css.py b/pandas/tests/io/formats/test_css.py index e7adfe4883d98..f251bd983509e 100644 --- a/pandas/tests/io/formats/test_css.py +++ b/pandas/tests/io/formats/test_css.py @@ -1,6 +1,7 @@ import pytest from pandas.util import testing as tm + from pandas.io.formats.css import CSSResolver, CSSWarning diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py index 9d5773283176c..455b6454d73ff 100644 --- a/pandas/tests/io/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -1,10 +1,13 @@ import numpy as np + +from pandas.compat import u + import pandas as pd from pandas import DataFrame -from pandas.compat import u -import pandas.io.formats.format as fmt from pandas.util import testing as tm +import pandas.io.formats.format as fmt + class TestEngFormatter(object): diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index b974415ffb029..c979894048127 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -5,35 +5,35 @@ """ from __future__ import print_function -import re -import pytz -import dateutil +from datetime import datetime import itertools from operator import methodcaller import os +import re import sys import warnings -from datetime import datetime +import dateutil +import numpy as np import pytest +import pytz -import numpy as np -import pandas as pd -from pandas import (DataFrame, Series, Index, Timestamp, MultiIndex, - date_range, NaT, read_csv) -from pandas.compat import (range, zip, lrange, StringIO, PY3, - u, lzip, is_platform_windows, - is_platform_32bit) import pandas.compat as compat +from pandas.compat import ( + PY3, StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range, + u, zip) + +import pandas as pd +from pandas import ( + DataFrame, Index, MultiIndex, NaT, Series, Timestamp, date_range, read_csv) +from pandas.core.config import ( + get_option, option_context, reset_option, set_option) +import pandas.util.testing as tm import pandas.io.formats.format as fmt import pandas.io.formats.printing as printing - -import pandas.util.testing as tm from pandas.io.formats.terminal import get_terminal_size -from pandas.core.config import (set_option, get_option, option_context, - reset_option) use_32bit_repr = is_platform_windows() or is_platform_32bit() diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index c9c46d4a991ec..67ff68ac4db8c 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- +import numpy as np import pytest -import numpy as np import pandas as pd - from pandas import compat -import pandas.io.formats.printing as printing -import pandas.io.formats.format as fmt import pandas.core.config as cf +import pandas.io.formats.format as fmt +import pandas.io.formats.printing as printing + def test_adjoin(): data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']] diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 3432d686a9fd6..407c786725f13 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1,16 +1,18 @@ import copy -import textwrap import re +import textwrap -import pytest import numpy as np +import pytest + +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame import pandas.util.testing as tm -import pandas.util._test_decorators as td jinja2 = pytest.importorskip('jinja2') -from pandas.io.formats.style import Styler, _get_level_lengths # noqa +from pandas.io.formats.style import Styler, _get_level_lengths # noqa # isort:skip class TestStyler(object): diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 786c8fab08a01..1929817a49b3c 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -1,13 +1,12 @@ # -*- coding: utf-8 -*- +import os import sys +import numpy as np import pytest -import os -import numpy as np import pandas as pd - from pandas import DataFrame, compat from pandas.util import testing as tm diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 7d54f93c9831e..13eb517fcab6a 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -4,10 +4,11 @@ """ import pytest + import pandas.util.testing as tm -from pandas.io.formats.excel import CSSToExcelConverter from pandas.io.formats.css import CSSWarning +from pandas.io.formats.excel import CSSToExcelConverter @pytest.mark.parametrize('css,expected', [ diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index eb11433f46c0e..213eb0d5b5cb8 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -1,15 +1,18 @@ # -*- coding: utf-8 -*- -import re from datetime import datetime from io import open +import re -import pytest import numpy as np +import pytest + +from pandas.compat import StringIO, lrange, u + import pandas as pd -from pandas import compat, DataFrame, MultiIndex, option_context, Index -from pandas.compat import u, lrange, StringIO +from pandas import DataFrame, Index, MultiIndex, compat, option_context from pandas.util import testing as tm + import pandas.io.formats.format as fmt diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index f55fa289ea085..1653e474aa7b0 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -1,12 +1,13 @@ +import codecs from datetime import datetime import pytest +from pandas.compat import u + import pandas as pd -from pandas import DataFrame, compat, Series +from pandas import DataFrame, Series, compat from pandas.util import testing as tm -from pandas.compat import u -import codecs @pytest.fixture diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 4ebf435f7d75f..6774eac6d6c1a 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -35,28 +35,29 @@ """ from __future__ import print_function -from warnings import catch_warnings, filterwarnings + +from datetime import timedelta from distutils.version import LooseVersion -from pandas import (Series, DataFrame, Panel, - SparseSeries, SparseDataFrame, - Index, MultiIndex, bdate_range, to_msgpack, - date_range, period_range, timedelta_range, - Timestamp, NaT, Categorical, Period) -from pandas.tseries.offsets import ( - DateOffset, Hour, Minute, Day, - MonthBegin, MonthEnd, YearBegin, - YearEnd, Week, WeekOfMonth, LastWeekOfMonth, - BusinessDay, BusinessHour, CustomBusinessDay, FY5253, - Easter, - SemiMonthEnd, SemiMonthBegin, - QuarterBegin, QuarterEnd) -from pandas.compat import u import os +import platform as pl import sys +from warnings import catch_warnings, filterwarnings + import numpy as np + +from pandas.compat import u + import pandas -import platform as pl -from datetime import timedelta +from pandas import ( + Categorical, DataFrame, Index, MultiIndex, NaT, Panel, Period, Series, + SparseDataFrame, SparseSeries, Timestamp, bdate_range, date_range, + period_range, timedelta_range, to_msgpack) + +from pandas.tseries.offsets import ( + FY5253, BusinessDay, BusinessHour, CustomBusinessDay, DateOffset, Day, + Easter, Hour, LastWeekOfMonth, Minute, MonthBegin, MonthEnd, QuarterBegin, + QuarterEnd, SemiMonthBegin, SemiMonthEnd, Week, WeekOfMonth, YearBegin, + YearEnd) _loose_version = LooseVersion(pandas.__version__) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 46a5e511fe748..430acbdac804a 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -1,8 +1,9 @@ import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas.util.testing as tm -import pandas.util._test_decorators as td from pandas.util.testing import assert_frame_equal diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 4fda977706d8b..6fa3b5b3b2ed4 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -1,22 +1,21 @@ """Tests for Table Schema integration.""" -import json from collections import OrderedDict +import json import numpy as np -import pandas as pd import pytest -from pandas import DataFrame from pandas.core.dtypes.dtypes import ( - PeriodDtype, CategoricalDtype, DatetimeTZDtype) -from pandas.io.json.table_schema import ( - as_json_table_type, - build_table_schema, - convert_pandas_type_to_json_field, - convert_json_field_to_pandas_type, - set_default_names) + CategoricalDtype, DatetimeTZDtype, PeriodDtype) + +import pandas as pd +from pandas import DataFrame import pandas.util.testing as tm +from pandas.io.json.table_schema import ( + as_json_table_type, build_table_schema, convert_json_field_to_pandas_type, + convert_pandas_type_to_json_field, set_default_names) + class TestBuildSchema(object): diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 3881b315bbed9..fd0953a4834ca 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -1,9 +1,10 @@ -import pytest -import numpy as np import json +import numpy as np +import pytest + +from pandas import DataFrame, Index, compat import pandas.util.testing as tm -from pandas import compat, Index, DataFrame from pandas.io.json import json_normalize from pandas.io.json.normalize import nested_to_record diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 3fdf303ea2e8e..5468413033002 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1,22 +1,24 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 -import pytest -from pandas.compat import (range, lrange, StringIO, - OrderedDict, is_platform_32bit) -import os -import numpy as np -from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, - read_json, compat) from datetime import timedelta -import pandas as pd import json +import os -from pandas.util.testing import (assert_almost_equal, assert_frame_equal, - assert_series_equal, network, - ensure_clean, assert_index_equal) -import pandas.util.testing as tm +import numpy as np +import pytest + +from pandas.compat import ( + OrderedDict, StringIO, is_platform_32bit, lrange, range) import pandas.util._test_decorators as td +import pandas as pd +from pandas import ( + DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json) +import pandas.util.testing as tm +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_index_equal, + assert_series_equal, ensure_clean, network) + _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index 25750f4fd23b5..25e78526b2e5a 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -1,12 +1,15 @@ # -*- coding: utf-8 -*- import pytest + +from pandas.compat import StringIO + import pandas as pd from pandas import DataFrame, read_json -from pandas.compat import StringIO -from pandas.io.json.json import JsonReader import pandas.util.testing as tm -from pandas.util.testing import (assert_frame_equal, assert_series_equal, - ensure_clean) +from pandas.util.testing import ( + assert_frame_equal, assert_series_equal, ensure_clean) + +from pandas.io.json.json import JsonReader @pytest.fixture diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 4ad4f71791079..7f5241def597f 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -4,27 +4,28 @@ import json except ImportError: import simplejson as json -import math -import pytz -import locale -import pytest -import time -import datetime import calendar -import re +import datetime import decimal -import dateutil from functools import partial -from pandas.compat import range, StringIO, u -from pandas._libs.tslib import Timestamp +import locale +import math +import re +import time + +import dateutil +import numpy as np +import pytest +import pytz + import pandas._libs.json as ujson +from pandas._libs.tslib import Timestamp import pandas.compat as compat +from pandas.compat import StringIO, range, u -import numpy as np -from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range +from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range import pandas.util.testing as tm - json_unicode = (json.dumps if compat.PY3 else partial(json.dumps, encoding="utf-8")) diff --git a/pandas/tests/io/msgpack/common.py b/pandas/tests/io/msgpack/common.py index b770d12cffbfa..434d347c5742a 100644 --- a/pandas/tests/io/msgpack/common.py +++ b/pandas/tests/io/msgpack/common.py @@ -1,6 +1,5 @@ from pandas.compat import PY3 - # array compat if PY3: frombytes = lambda obj, data: obj.frombytes(data) diff --git a/pandas/tests/io/msgpack/test_buffer.py b/pandas/tests/io/msgpack/test_buffer.py index 8ebec734f1d3d..e36dc5bbdb4ba 100644 --- a/pandas/tests/io/msgpack/test_buffer.py +++ b/pandas/tests/io/msgpack/test_buffer.py @@ -1,6 +1,7 @@ # coding: utf-8 from pandas.io.msgpack import packb, unpackb + from .common import frombytes diff --git a/pandas/tests/io/msgpack/test_except.py b/pandas/tests/io/msgpack/test_except.py index 8e8d43a16eee9..d670e846c382a 100644 --- a/pandas/tests/io/msgpack/test_except.py +++ b/pandas/tests/io/msgpack/test_except.py @@ -1,10 +1,11 @@ # coding: utf-8 from datetime import datetime -from pandas.io.msgpack import packb, unpackb import pytest +from pandas.io.msgpack import packb, unpackb + class DummyException(Exception): pass diff --git a/pandas/tests/io/msgpack/test_extension.py b/pandas/tests/io/msgpack/test_extension.py index 2ee72c8a55cb4..06a0691bf4f7e 100644 --- a/pandas/tests/io/msgpack/test_extension.py +++ b/pandas/tests/io/msgpack/test_extension.py @@ -1,8 +1,10 @@ from __future__ import print_function + import array import pandas.io.msgpack as msgpack from pandas.io.msgpack import ExtType + from .common import frombytes, tobytes diff --git a/pandas/tests/io/msgpack/test_limits.py b/pandas/tests/io/msgpack/test_limits.py index 2d759d6117f2a..cad51da483c71 100644 --- a/pandas/tests/io/msgpack/test_limits.py +++ b/pandas/tests/io/msgpack/test_limits.py @@ -1,10 +1,11 @@ # coding: utf-8 -from __future__ import (absolute_import, division, print_function, - unicode_literals) -from pandas.io.msgpack import packb, unpackb, Packer, Unpacker, ExtType +from __future__ import ( + absolute_import, division, print_function, unicode_literals) import pytest +from pandas.io.msgpack import ExtType, Packer, Unpacker, packb, unpackb + class TestLimits(object): diff --git a/pandas/tests/io/msgpack/test_newspec.py b/pandas/tests/io/msgpack/test_newspec.py index 783bfc1b364f8..d92c649c5e1ca 100644 --- a/pandas/tests/io/msgpack/test_newspec.py +++ b/pandas/tests/io/msgpack/test_newspec.py @@ -1,6 +1,6 @@ # coding: utf-8 -from pandas.io.msgpack import packb, unpackb, ExtType +from pandas.io.msgpack import ExtType, packb, unpackb def test_str8(): diff --git a/pandas/tests/io/msgpack/test_pack.py b/pandas/tests/io/msgpack/test_pack.py index 3afd1fc086b33..f69ac0a0bc4ce 100644 --- a/pandas/tests/io/msgpack/test_pack.py +++ b/pandas/tests/io/msgpack/test_pack.py @@ -1,12 +1,14 @@ # coding: utf-8 +import struct + import pytest -import struct +from pandas.compat import OrderedDict, u from pandas import compat -from pandas.compat import u, OrderedDict -from pandas.io.msgpack import packb, unpackb, Unpacker, Packer + +from pandas.io.msgpack import Packer, Unpacker, packb, unpackb class TestPack(object): diff --git a/pandas/tests/io/msgpack/test_read_size.py b/pandas/tests/io/msgpack/test_read_size.py index ef521fa345637..42791b571e8e7 100644 --- a/pandas/tests/io/msgpack/test_read_size.py +++ b/pandas/tests/io/msgpack/test_read_size.py @@ -1,5 +1,6 @@ """Test Unpacker's read_array_header and read_map_header methods""" -from pandas.io.msgpack import packb, Unpacker, OutOfData +from pandas.io.msgpack import OutOfData, Unpacker, packb + UnexpectedTypeException = ValueError diff --git a/pandas/tests/io/msgpack/test_seq.py b/pandas/tests/io/msgpack/test_seq.py index 06e9872a22777..68be8c2d975aa 100644 --- a/pandas/tests/io/msgpack/test_seq.py +++ b/pandas/tests/io/msgpack/test_seq.py @@ -1,6 +1,7 @@ # coding: utf-8 import io + import pandas.io.msgpack as msgpack binarydata = bytes(bytearray(range(256))) diff --git a/pandas/tests/io/msgpack/test_sequnpack.py b/pandas/tests/io/msgpack/test_sequnpack.py index be0a23f60f18a..48f9817142762 100644 --- a/pandas/tests/io/msgpack/test_sequnpack.py +++ b/pandas/tests/io/msgpack/test_sequnpack.py @@ -1,10 +1,10 @@ # coding: utf-8 +import pytest + from pandas import compat -from pandas.io.msgpack import Unpacker, BufferFull -from pandas.io.msgpack import OutOfData -import pytest +from pandas.io.msgpack import BufferFull, OutOfData, Unpacker class TestPack(object): diff --git a/pandas/tests/io/msgpack/test_subtype.py b/pandas/tests/io/msgpack/test_subtype.py index e27ec66c63e1f..8af7e0b91d9b7 100644 --- a/pandas/tests/io/msgpack/test_subtype.py +++ b/pandas/tests/io/msgpack/test_subtype.py @@ -1,8 +1,9 @@ # coding: utf-8 -from pandas.io.msgpack import packb from collections import namedtuple +from pandas.io.msgpack import packb + class MyList(list): pass diff --git a/pandas/tests/io/msgpack/test_unpack.py b/pandas/tests/io/msgpack/test_unpack.py index c056f8d800e11..e63631a97bbb4 100644 --- a/pandas/tests/io/msgpack/test_unpack.py +++ b/pandas/tests/io/msgpack/test_unpack.py @@ -1,8 +1,10 @@ from io import BytesIO import sys -from pandas.io.msgpack import Unpacker, packb, OutOfData, ExtType + import pytest +from pandas.io.msgpack import ExtType, OutOfData, Unpacker, packb + class TestUnpack(object): diff --git a/pandas/tests/io/msgpack/test_unpack_raw.py b/pandas/tests/io/msgpack/test_unpack_raw.py index a261bf4cbbcd7..09ebb681d8709 100644 --- a/pandas/tests/io/msgpack/test_unpack_raw.py +++ b/pandas/tests/io/msgpack/test_unpack_raw.py @@ -1,6 +1,7 @@ """Tests for cases where the user seeks to obtain packed msgpack objects""" import io + from pandas.io.msgpack import Unpacker, packb diff --git a/pandas/tests/io/sas/test_sas.py b/pandas/tests/io/sas/test_sas.py index 016dc56b4d800..0f6342aa62ac0 100644 --- a/pandas/tests/io/sas/test_sas.py +++ b/pandas/tests/io/sas/test_sas.py @@ -1,6 +1,7 @@ import pytest from pandas.compat import StringIO + from pandas import read_sas diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 705387188438f..3dd8d0449ef5f 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -1,13 +1,16 @@ -import pandas as pd -from pandas.compat import PY2 -import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.errors import EmptyDataError -import os import io +import os + import numpy as np import pytest +from pandas.compat import PY2 +from pandas.errors import EmptyDataError +import pandas.util._test_decorators as td + +import pandas as pd +import pandas.util.testing as tm + # https://github.com/cython/cython/issues/1720 @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index 6e5b2ab067aa5..1b086daf51c41 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -1,9 +1,12 @@ +import os + +import numpy as np import pytest + import pandas as pd import pandas.util.testing as tm + from pandas.io.sas.sasreader import read_sas -import numpy as np -import os # CSV versions of test xpt files were obtained using the R foreign library diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 99bece0efc8c8..8eb26d9f3dec5 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -1,19 +1,18 @@ # -*- coding: utf-8 -*- -import numpy as np -from numpy.random import randint from textwrap import dedent +import numpy as np +from numpy.random import randint import pytest -import pandas as pd -from pandas import DataFrame -from pandas import read_clipboard -from pandas import get_option from pandas.compat import PY2 + +import pandas as pd +from pandas import DataFrame, get_option, read_clipboard from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf -from pandas.io.clipboard.exceptions import PyperclipException +from pandas.io.clipboard.exceptions import PyperclipException try: DataFrame({'A': [1, 2]}).to_clipboard() diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 2f2b792588a92..a4c76285c95aa 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -6,15 +6,13 @@ import pytest -import pandas as pd -import pandas.io.common as icom +from pandas.compat import FileNotFoundError, StringIO, is_platform_windows import pandas.util._test_decorators as td + +import pandas as pd import pandas.util.testing as tm -from pandas.compat import ( - is_platform_windows, - StringIO, - FileNotFoundError, -) + +import pandas.io.common as icom class CustomFSPath(object): diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index b62a1e6c4933e..a3fb35f9f01f2 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,13 +1,14 @@ +import contextlib import os import warnings -import contextlib import pytest import pandas as pd -import pandas.io.common as icom import pandas.util.testing as tm +import pandas.io.common as icom + @contextlib.contextmanager def catch_to_csv_depr(): diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 44d642399ced9..d170e4c43feb3 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -2,15 +2,16 @@ from distutils.version import LooseVersion import numpy as np +import pytest import pandas as pd import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, ensure_clean -import pytest +from pandas.io.feather_format import read_feather, to_feather # noqa:E402 + pyarrow = pytest.importorskip('pyarrow') -from pandas.io.feather_format import to_feather, read_feather # noqa:E402 pyarrow_version = LooseVersion(pyarrow.__version__) diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index 6dd16107bc7d7..15f366e5e2e9e 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -1,20 +1,22 @@ -import pytest from datetime import datetime -import pytz -import platform import os +import platform + +import numpy as np +import pytest +import pytz + +from pandas.compat import range + +import pandas as pd +from pandas import DataFrame, compat +import pandas.util.testing as tm try: from unittest import mock except ImportError: mock = pytest.importorskip("mock") -import numpy as np -import pandas as pd -from pandas import compat, DataFrame -from pandas.compat import range -import pandas.util.testing as tm - api_exceptions = pytest.importorskip("google.api_core.exceptions") bigquery = pytest.importorskip("google.cloud.bigquery") diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index efbd57dec9f1b..12b082c3d4099 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -1,12 +1,14 @@ import numpy as np import pytest -from pandas import DataFrame, date_range, read_csv from pandas.compat import StringIO -from pandas.io.common import is_gcs_url + +from pandas import DataFrame, date_range, read_csv from pandas.util import _test_decorators as td from pandas.util.testing import assert_frame_equal +from pandas.io.common import is_gcs_url + def test_is_gcs_url(): assert is_gcs_url("gcs://pandas/somethingelse.com") diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 492089644fb15..b2b0c21c81263 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1,29 +1,28 @@ from __future__ import print_function +from functools import partial import os import re import threading -from functools import partial - -import pytest - import numpy as np from numpy.random import rand +import pytest -from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index, - date_range, Series) -from pandas.compat import (map, zip, StringIO, BytesIO, - is_platform_windows, PY3, reload) +from pandas.compat import ( + PY3, BytesIO, StringIO, is_platform_windows, map, reload, zip) from pandas.errors import ParserError -from pandas.io.common import URLError, file_path_to_url -import pandas.io.html -from pandas.io.html import read_html +import pandas.util._test_decorators as td +from pandas import ( + DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv) import pandas.util.testing as tm -import pandas.util._test_decorators as td from pandas.util.testing import makeCustomDataframe as mkdf, network +from pandas.io.common import URLError, file_path_to_url +import pandas.io.html +from pandas.io.html import read_html + HERE = os.path.dirname(__file__) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index e4f10de7f5b2b..9034b964033ed 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1,18 +1,20 @@ """ test parquet compat """ -import os - -import pytest import datetime from distutils.version import LooseVersion +import os from warnings import catch_warnings import numpy as np -import pandas as pd +import pytest + from pandas.compat import PY3 -from pandas.io.parquet import (to_parquet, read_parquet, get_engine, - PyArrowImpl, FastParquetImpl) + +import pandas as pd from pandas.util import testing as tm +from pandas.io.parquet import ( + FastParquetImpl, PyArrowImpl, get_engine, read_parquet, to_parquet) + try: import pyarrow # noqa _HAVE_PYARROW = True diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 85d467650d5c4..7f3fe1aa401ea 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -12,20 +12,22 @@ 3. Move the created pickle to "data/legacy_pickle/<version>" directory. """ +from distutils.version import LooseVersion import glob -import pytest +import os +import shutil from warnings import catch_warnings, simplefilter -import os -from distutils.version import LooseVersion +import pytest + +from pandas.compat import PY3, is_platform_little_endian +import pandas.util._test_decorators as td + import pandas as pd from pandas import Index -from pandas.compat import is_platform_little_endian, PY3 -import pandas import pandas.util.testing as tm -import pandas.util._test_decorators as td + from pandas.tseries.offsets import Day, MonthEnd -import shutil @pytest.fixture(scope='module') @@ -63,7 +65,7 @@ def compare(data, vf, version): # py3 compat when reading py2 pickle try: - data = pandas.read_pickle(vf) + data = pd.read_pickle(vf) except (ValueError) as e: if 'unsupported pickle protocol:' in str(e): # trying to read a py3 pickle in py2 @@ -111,13 +113,13 @@ def compare_series_ts(result, expected, typ, version): freq = result.index.freq assert freq + Day(1) == Day(2) - res = freq + pandas.Timedelta(hours=1) - assert isinstance(res, pandas.Timedelta) - assert res == pandas.Timedelta(days=1, hours=1) + res = freq + pd.Timedelta(hours=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, hours=1) - res = freq + pandas.Timedelta(nanoseconds=1) - assert isinstance(res, pandas.Timedelta) - assert res == pandas.Timedelta(days=1, nanoseconds=1) + res = freq + pd.Timedelta(nanoseconds=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, nanoseconds=1) def compare_series_dt_tz(result, expected, typ, version): @@ -337,7 +339,7 @@ def compress_file(self, src_path, dest_path, compression): compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == 'xz': - lzma = pandas.compat.import_lzma() + lzma = pd.compat.import_lzma() f = lzma.LZMAFile(dest_path, "w") else: msg = 'Unrecognized compression type: {}'.format(compression) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 4179e81d02042..55b738a56f809 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1,39 +1,37 @@ -import pytest -import os -import tempfile from contextlib import contextmanager -from warnings import catch_warnings, simplefilter -from distutils.version import LooseVersion - import datetime from datetime import timedelta +from distutils.version import LooseVersion +import os +import tempfile +from warnings import catch_warnings, simplefilter import numpy as np +import pytest -import pandas as pd -from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, - RangeIndex, Categorical, bdate_range, - date_range, timedelta_range, Index, DatetimeIndex, - isna, compat, concat, Timestamp) +from pandas.compat import ( + PY35, PY36, BytesIO, is_platform_little_endian, is_platform_windows, + lrange, range, text_type, u) +import pandas.util._test_decorators as td +from pandas.core.dtypes.common import is_categorical_dtype + +import pandas as pd +from pandas import ( + Categorical, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, + Panel, RangeIndex, Series, Timestamp, bdate_range, compat, concat, + date_range, isna, timedelta_range) import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.util.testing import (assert_panel_equal, - assert_frame_equal, - assert_series_equal, - set_timezone) - -from pandas.compat import (is_platform_windows, is_platform_little_endian, - PY35, PY36, BytesIO, text_type, - range, lrange, u) +from pandas.util.testing import ( + assert_frame_equal, assert_panel_equal, assert_series_equal, set_timezone) + +from pandas.io import pytables as pytables # noqa:E402 from pandas.io.formats.printing import pprint_thing -from pandas.core.dtypes.common import is_categorical_dtype +from pandas.io.pytables import ( + ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf) +from pandas.io.pytables import TableIterator # noqa:E402 tables = pytest.importorskip('tables') -from pandas.io import pytables as pytables # noqa:E402 -from pandas.io.pytables import (TableIterator, # noqa:E402 - HDFStore, Term, read_hdf, - PossibleDataLossError, ClosedFileError) _default_compressor = ('blosc' if LooseVersion(tables.__version__) >= diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py index a2c3d17f8754a..32eae8ed328f4 100644 --- a/pandas/tests/io/test_s3.py +++ b/pandas/tests/io/test_s3.py @@ -1,7 +1,9 @@ import pytest -from pandas import read_csv from pandas.compat import BytesIO + +from pandas import read_csv + from pandas.io.common import is_s3_url diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 1ad5d636ccf23..75a6d8d009083 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -18,27 +18,29 @@ """ from __future__ import print_function -import pytest -import sqlite3 -import csv +import csv +from datetime import date, datetime, time +import sqlite3 import warnings -import numpy as np -import pandas as pd -from datetime import datetime, date, time +import numpy as np +import pytest -from pandas.core.dtypes.common import (is_datetime64_dtype, - is_datetime64tz_dtype) -from pandas import DataFrame, Series, Index, MultiIndex, isna, concat -from pandas import date_range, to_datetime, to_timedelta, Timestamp import pandas.compat as compat -from pandas.compat import range, lrange, string_types, PY36 +from pandas.compat import PY36, lrange, range, string_types -import pandas.io.sql as sql -from pandas.io.sql import read_sql_table, read_sql_query +from pandas.core.dtypes.common import ( + is_datetime64_dtype, is_datetime64tz_dtype) + +import pandas as pd +from pandas import ( + DataFrame, Index, MultiIndex, Series, Timestamp, concat, date_range, isna, + to_datetime, to_timedelta) import pandas.util.testing as tm +import pandas.io.sql as sql +from pandas.io.sql import read_sql_query, read_sql_table try: import sqlalchemy diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 3413b8fdf18d1..ce9be6a7857bf 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -1,27 +1,31 @@ # -*- coding: utf-8 -*- # pylint: disable=E1101 +from collections import OrderedDict import datetime as dt -import io +from datetime import datetime import gzip +import io import os import struct import warnings -from collections import OrderedDict -from datetime import datetime import numpy as np import pytest -import pandas as pd -import pandas.util.testing as tm import pandas.compat as compat -from pandas.compat import iterkeys, PY3, ResourceWarning +from pandas.compat import PY3, ResourceWarning, iterkeys + from pandas.core.dtypes.common import is_categorical_dtype + +import pandas as pd from pandas.core.frame import DataFrame, Series +import pandas.util.testing as tm + from pandas.io.parsers import read_csv -from pandas.io.stata import (InvalidColumnName, PossiblePrecisionLoss, - StataMissingValue, StataReader, read_stata) +from pandas.io.stata import ( + InvalidColumnName, PossiblePrecisionLoss, StataMissingValue, StataReader, + read_stata) @pytest.fixture diff --git a/setup.cfg b/setup.cfg index 1d8b1d2a37249..89c47ed9074bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -137,52 +137,6 @@ skip= pandas/tests/test_take.py, pandas/tests/test_nanops.py, pandas/tests/test_config.py, - pandas/tests/io/test_clipboard.py, - pandas/tests/io/test_compression.py, - pandas/tests/io/test_pytables.py, - pandas/tests/io/test_parquet.py, - pandas/tests/io/generate_legacy_storage_files.py, - pandas/tests/io/test_common.py, - pandas/tests/io/test_feather.py, - pandas/tests/io/test_s3.py, - pandas/tests/io/test_html.py, - pandas/tests/io/test_sql.py, - pandas/tests/io/test_stata.py, - pandas/tests/io/conftest.py, - pandas/tests/io/test_pickle.py, - pandas/tests/io/test_gbq.py, - pandas/tests/io/test_gcs.py, - pandas/tests/io/sas/test_sas.py, - pandas/tests/io/sas/test_sas7bdat.py, - pandas/tests/io/sas/test_xport.py, - pandas/tests/io/formats/test_eng_formatting.py, - pandas/tests/io/formats/test_to_excel.py, - pandas/tests/io/formats/test_to_html.py, - pandas/tests/io/formats/test_style.py, - pandas/tests/io/formats/test_format.py, - pandas/tests/io/formats/test_to_csv.py, - pandas/tests/io/formats/test_css.py, - pandas/tests/io/formats/test_to_latex.py, - pandas/tests/io/formats/test_printing.py, - pandas/tests/io/msgpack/test_buffer.py, - pandas/tests/io/msgpack/test_read_size.py, - pandas/tests/io/msgpack/test_pack.py, - pandas/tests/io/msgpack/test_except.py, - pandas/tests/io/msgpack/test_unpack_raw.py, - pandas/tests/io/msgpack/test_unpack.py, - pandas/tests/io/msgpack/test_newspec.py, - pandas/tests/io/msgpack/common.py, - pandas/tests/io/msgpack/test_limits.py, - pandas/tests/io/msgpack/test_extension.py, - pandas/tests/io/msgpack/test_sequnpack.py, - pandas/tests/io/msgpack/test_subtype.py, - pandas/tests/io/msgpack/test_seq.py, - pandas/tests/io/json/test_compression.py, - pandas/tests/io/json/test_ujson.py, - pandas/tests/io/json/test_normalize.py, - pandas/tests/io/json/test_readlines.py, - pandas/tests/io/json/test_pandas.py, - pandas/tests/io/json/test_json_table_schema.py, pandas/tests/api/test_types.py, pandas/tests/api/test_api.py, pandas/tests/tools/test_numeric.py,
- [ ] xref #23334 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24532
2019-01-01T18:11:22Z
2019-01-02T02:54:07Z
2019-01-02T02:54:07Z
2019-01-02T20:25:41Z