title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
DataFrame.drop Raises KeyError definition | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 608e5c53ec094..a40733b7076b0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3797,7 +3797,12 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
- index, columns : single label or list-like
+ index : single label or list-like
+ Alternative to specifying axis (``labels, axis=0``
+ is equivalent to ``index=labels``).
+
+ .. versionadded:: 0.21.0
+ columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
@@ -3813,11 +3818,12 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
Returns
-------
DataFrame
+ DataFrame without the removed index or column labels.
Raises
------
KeyError
- If none of the labels are found in the selected axis
+ If any of the labels is not found in the selected axis.
See Also
--------
@@ -3830,7 +3836,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
Examples
--------
- >>> df = pd.DataFrame(np.arange(12).reshape(3,4),
+ >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
@@ -3867,7 +3873,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None,
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
- ... [1, 0.8], [0.3,0.2]])
+ ... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
| - [X] closes #25473
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/25474 | 2019-02-28T11:41:56Z | 2019-02-28T12:41:08Z | 2019-02-28T12:41:08Z | 2019-02-28T14:11:26Z |
BUG: Keep column level name in resample nunique | diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst
index 6ed85ff2fac43..c7f9113b53c22 100644
--- a/doc/source/reference/groupby.rst
+++ b/doc/source/reference/groupby.rst
@@ -99,6 +99,7 @@ application to columns of a specific data type.
DataFrameGroupBy.idxmax
DataFrameGroupBy.idxmin
DataFrameGroupBy.mad
+ DataFrameGroupBy.nunique
DataFrameGroupBy.pct_change
DataFrameGroupBy.plot
DataFrameGroupBy.quantile
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..ee16246a1421d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -210,6 +210,7 @@ Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`)
+- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`)
-
-
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 52056a6842ed9..683c21f7bd47a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1579,6 +1579,7 @@ def groupby_series(obj, col=None):
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
+ results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index a884a37840f8a..1788b29a11082 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -897,6 +897,15 @@ def test_nunique_with_timegrouper():
tm.assert_series_equal(result, expected)
+def test_nunique_preserves_column_level_names():
+ # GH 23222
+ test = pd.DataFrame([1, 2, 2],
+ columns=pd.Index(['A'], name="level_0"))
+ result = test.groupby([0, 0, 0]).nunique()
+ expected = pd.DataFrame([2], columns=test.columns)
+ tm.assert_frame_equal(result, expected)
+
+
# count
# --------------------------------
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 71b100401ec21..ce675893d9907 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1135,6 +1135,15 @@ def test_resample_nunique():
assert_series_equal(result, expected)
+def test_resample_nunique_preserves_column_level_names():
+ # see gh-23222
+ df = tm.makeTimeDataFrame(freq="1D").abs()
+ df.columns = pd.MultiIndex.from_arrays([df.columns.tolist()] * 2,
+ names=["lev0", "lev1"])
+ result = df.resample("1h").nunique()
+ tm.assert_index_equal(df.columns, result.columns)
+
+
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
| Closes #23222
xref #23645 | https://api.github.com/repos/pandas-dev/pandas/pulls/25469 | 2019-02-28T04:25:05Z | 2019-02-28T12:42:55Z | 2019-02-28T12:42:55Z | 2019-02-28T18:17:16Z |
ERR: Correct error message in to_datetime | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 170e7f14da397..f8b57f668c44d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -104,7 +104,8 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
-
+- Bug in :func:`to_datetime` which would raise an (incorrect) ``ValueError`` when called with a date far into the future and the ``format`` argument specified instead of raising ``OutOfBoundsDatetime`` (:issue:`23830`)
+-
-
Categorical
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index f932e236b5218..624872c1c56c6 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -670,9 +670,11 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise',
# dateutil parser will return incorrect result because
# it will ignore nanoseconds
if is_raise:
- raise ValueError("time data {val} doesn't "
- "match format specified"
- .format(val=val))
+
+ # Still raise OutOfBoundsDatetime,
+ # as error message is informative.
+ raise
+
assert is_ignore
return values, tz_out
raise
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index b94935d2521eb..dd914d8a79837 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1868,6 +1868,15 @@ def test_invalid_origins_tzinfo(self):
pd.to_datetime(1, unit='D',
origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
+ @pytest.mark.parametrize("format", [
+ None, "%Y-%m-%d %H:%M:%S"
+ ])
+ def test_to_datetime_out_of_bounds_with_format_arg(self, format):
+ # see gh-23830
+ msg = "Out of bounds nanosecond timestamp"
+ with pytest.raises(OutOfBoundsDatetime, match=msg):
+ to_datetime("2417-10-27 00:00:00", format=format)
+
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
| Closes #23830
xref #23969 | https://api.github.com/repos/pandas-dev/pandas/pulls/25467 | 2019-02-28T04:02:31Z | 2019-02-28T12:44:44Z | 2019-02-28T12:44:44Z | 2019-02-28T18:17:17Z |
DOC: Fix encoding of docstring validation for Windows | diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 14172a790887d..34395435bd8c5 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -1052,6 +1052,14 @@ def test_raises_for_invalid_attribute_name(self, invalid_name):
with pytest.raises(AttributeError, match=msg):
validate_docstrings.Docstring(invalid_name)
+ @pytest.mark.parametrize('name', ['pandas.Series.str.isdecimal',
+ 'pandas.Series.str.islower'])
+ def test_encode_content_write_to_file(self, name):
+ # GH25466
+ docstr = validate_docstrings.Docstring(name).validate_pep8()
+ # the list of pep8 errors should be empty
+ assert not list(docstr)
+
class TestMainFunction:
def test_exit_status_for_validate_one(self, monkeypatch):
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index ebb09e8f311ee..63db50db45a7c 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -583,7 +583,7 @@ def validate_pep8(self):
application = flake8.main.application.Application()
application.initialize(["--quiet"])
- with tempfile.NamedTemporaryFile(mode='w') as file:
+ with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as file:
file.write(content)
file.flush()
application.run_checks([file.name])
| In Windows, the `validate_docstrings.py` script fails because an encoding error. It has been fixed here.
PR done in the London python sprints meetup.
CC: @datapythonista | https://api.github.com/repos/pandas-dev/pandas/pulls/25466 | 2019-02-27T21:04:20Z | 2019-05-07T01:26:51Z | 2019-05-07T01:26:51Z | 2019-05-07T04:19:07Z |
BLD: modify travis to install numpy 1.6.2+fixes on py3 | diff --git a/.travis.yml b/.travis.yml
index 7568ec0763366..33c281b7f6d57 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ python:
- 2.7
- 3.1
- 3.2
- # - 3.3
+ - 3.3
matrix:
include:
@@ -13,20 +13,21 @@ matrix:
env: VBENCH=true
allow_failures:
- # - python: 3.3 # until travis yograde to 1.8.4
+ - python: 3.3 # until travis upgrade to 1.8.4
- python: 2.7
env: VBENCH=true
install:
- virtualenv --version
+ - date
- whoami
- pwd
- echo $VBENCH
# install 1.7.0b2 for 3.3, and pull a version of numpy git master
# with a alternate fix for detach bug as a temporary workaround
# for the others.
- - 'if [ $TRAVIS_PYTHON_VERSION == "3.3" ]; then pip uninstall numpy; pip install http://downloads.sourceforge.net/project/numpy/NumPy/1.7.0b2/numpy-1.7.0b2.tar.gz; fi'
- - 'if [ $TRAVIS_PYTHON_VERSION == "3.2" ] || [ $TRAVIS_PYTHON_VERSION == "3.1" ]; then pip install --use-mirrors git+git://github.com/numpy/numpy.git@089bfa5865cd39e2b40099755e8563d8f0d04f5f#egg=numpy; fi'
+ - 'if [ $TRAVIS_PYTHON_VERSION == "3.3" ]; then pip uninstall numpy; pip install https://github.com/numpy/numpy/archive/v1.7.0b2.tar.gz; fi'
+ - 'if [ $TRAVIS_PYTHON_VERSION == "3.2" ] || [ $TRAVIS_PYTHON_VERSION == "3.1" ]; then pip install https://github.com/y-p/numpy/archive/1.6.2_with_travis_fix.tar.gz; fi'
- 'if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then pip install numpy; fi' # should be nop if pre-installed
- pip install --use-mirrors cython nose pytz python-dateutil;
| 1.6.2 and git master have diverged quite a bit, best to narrow down the chance
of spurious errors..
| https://api.github.com/repos/pandas-dev/pandas/pulls/2378 | 2012-11-28T23:56:22Z | 2012-11-28T23:59:48Z | 2012-11-28T23:59:48Z | 2012-11-29T16:52:08Z |
BLD: fix travis.yml, print is a function in 3.x | diff --git a/.travis.yml b/.travis.yml
index 87da143d9d612..7568ec0763366 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -35,4 +35,4 @@ script:
- 'if [ x"$VBENCH" != x"true" ]; then nosetests --exe -w /tmp -A "not slow" pandas; fi'
- pwd
- 'if [ x"$VBENCH" == x"true" ]; then pip install sqlalchemy git+git://github.com/pydata/vbench.git; python vb_suite/perf_HEAD.py; fi'
- - python -c "import numpy;print numpy.version.version"
+ - python -c "import numpy;print(numpy.version.version)"
| https://api.github.com/repos/pandas-dev/pandas/pulls/2375 | 2012-11-28T18:24:34Z | 2012-11-28T18:25:35Z | 2012-11-28T18:25:35Z | 2012-11-28T18:25:35Z | |
DOC: mention new options API in whatsnew 0.10.0 | diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt
index 8a5652523dfda..ff4a4d53ff425 100644
--- a/doc/source/v0.10.0.txt
+++ b/doc/source/v0.10.0.txt
@@ -18,20 +18,33 @@ API changes
.. ipython:: python
def f(x):
- return Series([ x, x**2 ], index = ['x', 'x^s'])
+ return Series([ x, x**2 ], index = ['x', 'x^s'])
s = Series(np.random.rand(5))
- s
+ s
s.apply(f)
This is conceptually similar to the following.
.. ipython:: python
- concat([ f(y) for x, y in s.iteritems() ], axis=1).T
+ concat([ f(y) for x, y in s.iteritems() ], axis=1).T
+ - New API functions for working with pandas options (GH2097_):
+
+ - ``get_option`` / ``set_option`` - get/set the value of an option.
+ - ``reset_option`` / ``reset_options`` - reset an options / all options to their default value.
+ - ``describe_options`` - print a description of one or more option. When called with no arguments. print all registered options.
+ - ``set_printoptions`` is now deprecated (but functioning), the print options now live under "print_config.XYZ". For example:
+
+
+ .. ipython:: python
+
+ import pandas as pd
+ pd.get_option("print_config.max_rows")
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
.. _GH2316: https://github.com/pydata/pandas/issues/2316
+.. _GH2097: https://github.com/pydata/pandas/issues/2097
| https://api.github.com/repos/pandas-dev/pandas/pulls/2372 | 2012-11-27T23:15:05Z | 2012-11-27T23:15:15Z | 2012-11-27T23:15:15Z | 2012-11-27T23:15:15Z | |
Pytablesv4 | diff --git a/RELEASE.rst b/RELEASE.rst
index a34d5f0d4b58f..171b0e8bd2694 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -34,6 +34,7 @@ pandas 0.10.0
- Grouped histogram via `by` keyword in Series/DataFrame.hist (#2186)
- Support optional ``min_periods`` keyword in ``corr`` and ``cov``
for both Series and DataFrame (#2002)
+ - Add docs for ``HDFStore table`` format
**API Changes**
@@ -55,6 +56,11 @@ pandas 0.10.0
- Add ``normalize`` option to Series/DataFrame.asfreq (#2137)
- SparseSeries and SparseDataFrame construction from empty and scalar
values now no longer create dense ndarrays unnecessarily (#2322)
+ - Support multiple query selection formats for ``HDFStore tables`` (#1996)
+ - Support ``del store['df']`` syntax to delete HDFStores
+ - Add multi-dtype support for ``HDFStore tables``
+ - ``min_itemsize`` parameter can be specified in ``HDFStore table`` creation
+ - Indexing support in ``HDFStore tables`` (#698)
**Bug fixes**
@@ -72,6 +78,9 @@ pandas 0.10.0
- Respect dtype=object in DataFrame constructor (#2291)
- Fix DatetimeIndex.join bug with tz-aware indexes and how='outer' (#2317)
- pop(...) and del works with DataFrame with duplicate columns (#2349)
+ - Deleting of consecutive rows in ``HDFStore tables``` is much faster than before
+ - Appending on a HDFStore would fail if the table was not first created via ``put``
+
pandas 0.9.1
============
diff --git a/doc/source/io.rst b/doc/source/io.rst
index f74120ad7ef57..1108f9ca7ef83 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1,3 +1,4 @@
+
.. _io:
.. currentmodule:: pandas
@@ -793,17 +794,123 @@ Objects can be written to the file just like adding key-value pairs to a dict:
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
+ # store.put('s', s') is an equivalent method
store['s'] = s
+
store['df'] = df
+
store['wp'] = wp
+
+ # the type of stored data
+ store.handle.root.wp._v_attrs.pandas_type
+
store
In a current or later Python session, you can retrieve stored objects:
.. ipython:: python
+ # store.get('df') is an equivalent method
store['df']
+Deletion of the object specified by the key
+
+.. ipython:: python
+
+ # store.remove('wp') is an equivalent method
+ del store['wp']
+
+ store
+
+.. ipython:: python
+ :suppress:
+
+ store.close()
+ import os
+ os.remove('store.h5')
+
+
+These stores are **not** appendable once written (though you can simply remove them and rewrite). Nor are they **queryable**; they must be retrieved in their entirety.
+
+
+Storing in Table format
+~~~~~~~~~~~~~~~~~~~~~~~
+
+``HDFStore`` supports another ``PyTables`` format on disk, the ``table`` format. Conceptually a ``table`` is shaped
+very much like a DataFrame, with rows and columns. A ``table`` may be appended to in the same or other sessions.
+In addition, delete & query type operations are supported. You can create an index with ``create_table_index``
+after data is already in the table (this may become automatic in the future or an option on appending/putting a ``table``).
+
+.. ipython:: python
+ :suppress:
+ :okexcept:
+
+ os.remove('store.h5')
+
+.. ipython:: python
+
+ store = HDFStore('store.h5')
+ df1 = df[0:4]
+ df2 = df[4:]
+ store.append('df', df1)
+ store.append('df', df2)
+ store.append('wp', wp)
+ store
+
+ store.select('df')
+
+ # the type of stored data
+ store.handle.root.df._v_attrs.pandas_type
+
+ store.create_table_index('df')
+ store.handle.root.df.table
+
+.. ipython:: python
+ :suppress:
+
+ store.close()
+ import os
+ os.remove('store.h5')
+
+
+Querying objects stored in Table format
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``select`` and ``delete`` operations have an optional criteria that can be specified to select/delete only
+a subset of the data. This allows one to have a very large on-disk table and retrieve only a portion of the data.
+
+A query is specified using the ``Term`` class under the hood.
+
+ - 'index' and 'column' are supported indexers of a DataFrame
+ - 'major_axis' and 'minor_axis' are supported indexers of the Panel
+
+Valid terms can be created from ``dict, list, tuple, or string``. Objects can be embeded as values. Allowed operations are: ``<, <=, >, >=, =``. ``=`` will be inferred as an implicit set operation (e.g. if 2 or more values are provided). The following are all valid terms.
+
+ - ``dict(field = 'index', op = '>', value = '20121114')``
+ - ``('index', '>', '20121114')``
+ - ``'index>20121114'``
+ - ``('index', '>', datetime(2012,11,14))``
+ - ``('index', ['20121114','20121115'])``
+ - ``('major', '=', Timestamp('2012/11/14'))``
+ - ``('minor_axis', ['A','B'])``
+
+Queries are built up using a list of ``Terms`` (currently only **anding** of terms is supported). An example query for a panel might be specified as follows.
+``['major_axis>20000102', ('minor_axis', '=', ['A','B']) ]``. This is roughly translated to: `major_axis must be greater than the date 20000102 and the minor_axis must be A or B`
+
+.. ipython:: python
+
+ store = HDFStore('store.h5')
+ store.append('wp',wp)
+ store.select('wp',[ 'major_axis>20000102', ('minor_axis', '=', ['A','B']) ])
+
+Delete from objects stored in Table format
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. ipython:: python
+
+ store.remove('wp', 'index>20000102' )
+ store.select('wp')
+
.. ipython:: python
:suppress:
@@ -811,9 +918,27 @@ In a current or later Python session, you can retrieve stored objects:
import os
os.remove('store.h5')
+Notes & Caveats
+~~~~~~~~~~~~~~~
+
+ - Selection by items (the top level panel dimension) is not possible; you always get all of the items in the returned Panel
+ - ``PyTables`` only supports fixed-width string columns in ``tables``. The sizes of a string based indexing column (e.g. *index* or *minor_axis*) are determined as the maximum size of the elements in that axis or by passing the ``min_itemsize`` on the first table creation. If subsequent appends introduce elements in the indexing axis that are larger than the supported indexer, an Exception will be raised (otherwise you could have a silent truncation of these indexers, leading to loss of information).
+ - Once a ``table`` is created its items (Panel) / columns (DataFrame) are fixed; only exactly the same columns can be appended
+ - You can not append/select/delete to a non-table (table creation is determined on the first append, or by passing ``table=True`` in a put operation)
+
+Performance
+~~~~~~~~~~~
+
+ - ``Tables`` come with a performance penalty as compared to regular stores. The benefit is the ability to append/delete and query (potentially very large amounts of data).
+ Write times are generally longer as compared with regular stores. Query times can be quite fast, especially on an indexed axis.
+ - ``Tables`` can (as of 0.10.0) be expressed as different types.
-.. Storing in Table format
-.. ~~~~~~~~~~~~~~~~~~~~~~~
+ - ``AppendableTable`` which is a similiar table to past versions (this is the default).
+ - ``WORMTable`` (pending implementation) - is available to faciliate very fast writing of tables that are also queryable (but CANNOT support appends)
-.. Querying objects stored in Table format
-.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ - To delete a lot of data, it is sometimes better to erase the table and rewrite it. ``PyTables`` tends to increase the file size with deletions
+ - In general it is best to store Panels with the most frequently selected dimension in the minor axis and a time/date like dimension in the major axis, but this is not required. Panels can have any major_axis and minor_axis type that is a valid Panel indexer.
+ - No dimensions are currently indexed automagically (in the ``PyTables`` sense); these require an explict call to ``create_table_index``
+ - ``Tables`` offer better performance when compressed after writing them (as opposed to turning on compression at the very beginning)
+ use the pytables utilities ``ptrepack`` to rewrite the file (and also can change compression methods)
+ - Duplicate rows can be written, but are filtered out in selection (with the last items being selected; thus a table is unique on major, minor pairs)
diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt
index ff4a4d53ff425..401b2c661460f 100644
--- a/doc/source/v0.10.0.txt
+++ b/doc/source/v0.10.0.txt
@@ -9,6 +9,95 @@ enhancements along with a large number of bug fixes.
New features
~~~~~~~~~~~~
+Updated PyTables Support
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Docs for PyTables ``Table`` format & several enhancements to the api. Here is a taste of what to expect.
+
+`the full docs for tables
+<https://github.com/pydata/pandas/blob/master/io.html#hdf5-pytables>`__
+
+
+ .. ipython:: python
+ :suppress:
+ :okexcept:
+
+ os.remove('store.h5')
+
+ .. ipython:: python
+
+ store = HDFStore('store.h5')
+ df = DataFrame(randn(8, 3), index=date_range('1/1/2000', periods=8),
+ columns=['A', 'B', 'C'])
+ df
+
+ # appending data frames
+ df1 = df[0:4]
+ df2 = df[4:]
+ store.append('df', df1)
+ store.append('df', df2)
+ store
+
+ # selecting the entire store
+ store.select('df')
+
+ .. ipython:: python
+
+ from pandas.io.pytables import Term
+ wp = Panel(randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
+ wp
+
+ # storing a panel
+ store.append('wp',wp)
+
+ # selecting via A QUERY
+ store.select('wp',
+ [ Term('major_axis>20000102'), Term('minor_axis', '=', ['A','B']) ])
+
+ # removing data from tables
+ store.remove('wp', [ 'major_axis', '>', wp.major_axis[3] ])
+ store.select('wp')
+
+ # deleting a store
+ del store['df']
+ store
+
+ **Enhancements**
+
+ - added multi-dtype support!
+
+ .. ipython:: python
+
+ df['string'] = 'string'
+ df['int'] = 1
+
+ store.append('df',df)
+ df1 = store.select('df')
+ df1
+ df1.get_dtype_counts()
+
+ - performance improvments on table writing
+ - support for arbitrarily indexed dimensions
+
+ **Bug Fixes**
+
+ - added ``Term`` method of specifying where conditions, closes GH #1996
+ - ``del store['df']`` now call ``store.remove('df')`` for store deletion
+ - deleting of consecutive rows is much faster than before
+ - ``min_itemsize`` parameter can be specified in table creation to force a minimum size for indexing columns
+ (the previous implementation would set the column size based on the first append)
+ - indexing support via ``create_table_index`` (requires PyTables >= 2.3), close GH #698
+ - appending on a store would fail if the table was not first created via ``put``
+ - minor change to select and remove: require a table ONLY if where is also provided (and not None)
+
+ .. ipython:: python
+ :suppress:
+
+ store.close()
+ import os
+ os.remove('store.h5')
API changes
~~~~~~~~~~~
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 41120376c5e90..1261ebbc93618 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -7,6 +7,9 @@
from datetime import datetime, date
import time
+import re
+import copy
+import itertools
import numpy as np
from pandas import (
@@ -19,14 +22,17 @@
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Factor
-from pandas.core.common import _asarray_tuplesafe
-from pandas.core.internals import BlockManager, make_block
+from pandas.core.common import _asarray_tuplesafe, _try_sort
+from pandas.core.internals import BlockManager, make_block, form_blocks
from pandas.core.reshape import block2d_to_block3d
import pandas.core.common as com
+from pandas.tools.merge import concat
import pandas.lib as lib
from contextlib import contextmanager
+import pandas._pytables as pylib
+
# reading and writing the full object in one go
_TYPE_MAP = {
Series: 'series',
@@ -67,12 +73,20 @@
# oh the troubles to reduce import time
_table_mod = None
+_table_supports_index = False
def _tables():
global _table_mod
+ global _table_supports_index
if _table_mod is None:
import tables
_table_mod = tables
+
+ # version requirements
+ major, minor, subv = tables.__version__.split('.')
+ if int(major) >= 2 and int(minor) >= 3:
+ _table_supports_index = True
+
return _table_mod
@@ -188,6 +202,9 @@ def __getitem__(self, key):
def __setitem__(self, key, value):
self.put(key, value)
+ def __delitem__(self, key):
+ return self.remove(key)
+
def __contains__(self, key):
return hasattr(self.handle.root, key)
@@ -201,10 +218,16 @@ def __repr__(self):
keys = []
values = []
for k, v in sorted(self.handle.root._v_children.iteritems()):
- kind = v._v_attrs.pandas_type
+ kind = getattr(v._v_attrs,'pandas_type',None)
keys.append(str(k))
- values.append(_NAME_MAP[kind])
+
+ if kind is None:
+ values.append('unknown type')
+ elif _is_table_type(v):
+ values.append(str(create_table(self, v)))
+ else:
+ values.append(_NAME_MAP[kind])
output += adjoin(5, keys, values)
else:
@@ -295,33 +318,17 @@ def select(self, key, where=None):
Parameters
----------
key : object
- where : list, optional
-
- Must be a list of dict objects of the following forms. Selection can
- be performed on the 'index' or 'column' fields.
-
- Comparison op
- {'field' : 'index',
- 'op' : '>=',
- 'value' : value}
-
- Match single value
- {'field' : 'index',
- 'value' : v1}
-
- Match a set of values
- {'field' : 'index',
- 'value' : [v1, v2, v3]}
+ where : list of Term (or convertable) objects, optional
"""
group = getattr(self.handle.root, key, None)
- if 'table' not in group._v_attrs.pandas_type:
- raise Exception('can only select on objects written as tables')
+ if where is not None and not _is_table_type(group):
+ raise Exception('can only select with where on objects written as tables')
if group is not None:
return self._read_group(group, where)
def put(self, key, value, table=False, append=False,
- compression=None):
+ compression=None, **kwargs):
"""
Store object in HDFStore
@@ -342,7 +349,7 @@ def put(self, key, value, table=False, append=False,
be used.
"""
self._write_to_group(key, value, table=table, append=append,
- comp=compression)
+ comp=compression, **kwargs)
def _get_handler(self, op, kind):
return getattr(self, '_%s_%s' % (op, kind))
@@ -359,18 +366,23 @@ def remove(self, key, where=None):
For Table node, delete specified rows. See HDFStore.select for more
information
- Parameters
- ----------
- key : object
+ Returns
+ -------
+ number of rows removed (or None if not a Table)
+
"""
if where is None:
self.handle.removeNode(self.handle.root, key, recursive=True)
else:
group = getattr(self.handle.root, key, None)
if group is not None:
- self._delete_from_table(group, where)
+ if not _is_table_type(group):
+ raise Exception('can only remove with where on objects written as tables')
+ t = create_table(self, group)
+ return t.delete(where)
+ return None
- def append(self, key, value):
+ def append(self, key, value, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
@@ -385,10 +397,33 @@ def append(self, key, value):
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
- self._write_to_group(key, value, table=True, append=True)
+ self._write_to_group(key, value, table=True, append=True, **kwargs)
+
+ def create_table_index(self, key, **kwargs):
+ """ Create a pytables index on the table
+ Paramaters
+ ----------
+ key : object (the node to index)
+
+ Exceptions
+ ----------
+ raises if the node is not a table
+
+ """
+
+ # version requirements
+ if not _table_supports_index:
+ raise("PyTables >= 2.3 is required for table indexing")
+
+ group = getattr(self.handle.root, key, None)
+ if group is None: return
+
+ if not _is_table_type(group):
+ raise Exception("cannot create table index on a non-table")
+ create_table(self, group).create_index(**kwargs)
def _write_to_group(self, key, value, table=False, append=False,
- comp=None):
+ comp=None, **kwargs):
root = self.handle.root
if key not in root._v_children:
group = self.handle.createGroup(root, key)
@@ -400,7 +435,7 @@ def _write_to_group(self, key, value, table=False, append=False,
kind = '%s_table' % kind
handler = self._get_handler(op='write', kind=kind)
wrapper = lambda value: handler(group, value, append=append,
- comp=comp)
+ comp=comp, **kwargs)
else:
if append:
raise ValueError('Can only append to Tables')
@@ -531,18 +566,10 @@ def _read_block_manager(self, group):
return BlockManager(blocks, axes)
- def _write_frame_table(self, group, df, append=False, comp=None):
- mat = df.values
- values = mat.reshape((1,) + mat.shape)
-
- if df._is_mixed_type:
- raise Exception('Cannot currently store mixed-type DataFrame '
- 'objects in Table format')
-
- self._write_table(group, items=['value'],
- index=df.index, columns=df.columns,
- values=values, append=append, compression=comp)
-
+ def _write_frame_table(self, group, df, append=False, comp=None, **kwargs):
+ t = create_table(self, group, typ = 'appendable_frame')
+ t.write(axes_to_index=[0], obj=df, append=append, compression=comp, **kwargs)
+
def _write_wide(self, group, panel):
panel._consolidate_inplace()
self._write_block_manager(group, panel._data)
@@ -550,13 +577,14 @@ def _write_wide(self, group, panel):
def _read_wide(self, group, where=None):
return Panel(self._read_block_manager(group))
- def _write_wide_table(self, group, panel, append=False, comp=None):
- self._write_table(group, items=panel.items, index=panel.major_axis,
- columns=panel.minor_axis, values=panel.values,
- append=append, compression=comp)
-
+ def _write_wide_table(self, group, panel, append=False, comp=None, **kwargs):
+ t = create_table(self, group, typ = 'appendable_panel')
+ t.write(axes_to_index=[1,2], obj=panel,
+ append=append, compression=comp, **kwargs)
+
def _read_wide_table(self, group, where=None):
- return self._read_panel_table(group, where)
+ t = create_table(self, group)
+ return t.read(where)
def _write_index(self, group, key, index):
if isinstance(index, MultiIndex):
@@ -570,10 +598,10 @@ def _write_index(self, group, key, index):
self._write_sparse_intindex(group, key, index)
else:
setattr(group._v_attrs, '%s_variety' % key, 'regular')
- converted, kind, _ = _convert_index(index)
- self._write_array(group, key, converted)
+ converted = _convert_index(index).set_name('index')
+ self._write_array(group, key, converted.values)
node = getattr(group, key)
- node._v_attrs.kind = kind
+ node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
@@ -630,11 +658,11 @@ def _write_multi_index(self, group, key, index):
index.labels,
index.names)):
# write the level
- conv_level, kind, _ = _convert_index(lev)
level_key = '%s_level%d' % (key, i)
- self._write_array(group, level_key, conv_level)
+ conv_level = _convert_index(lev).set_name(level_key)
+ self._write_array(group, level_key, conv_level.values)
node = getattr(group, level_key)
- node._v_attrs.kind = kind
+ node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
@@ -739,89 +767,6 @@ def _write_array(self, group, key, value):
getattr(group, key)._v_attrs.transposed = transposed
- def _write_table(self, group, items=None, index=None, columns=None,
- values=None, append=False, compression=None):
- """ need to check for conform to the existing table:
- e.g. columns should match """
- # create dict of types
- index_converted, index_kind, index_t = _convert_index(index)
- columns_converted, cols_kind, col_t = _convert_index(columns)
-
- # create the table if it doesn't exist (or get it if it does)
- if not append:
- if 'table' in group:
- self.handle.removeNode(group, 'table')
-
- if 'table' not in group:
- # create the table
- desc = {'index': index_t,
- 'column': col_t,
- 'values': _tables().FloatCol(shape=(len(values)))}
-
- options = {'name': 'table',
- 'description': desc}
-
- if compression:
- complevel = self.complevel
- if complevel is None:
- complevel = 9
- filters = _tables().Filters(complevel=complevel,
- complib=compression,
- fletcher32=self.fletcher32)
- options['filters'] = filters
- elif self.filters is not None:
- options['filters'] = self.filters
-
- table = self.handle.createTable(group, **options)
- else:
- # the table must already exist
- table = getattr(group, 'table', None)
-
- # check for backwards incompatibility
- if append:
- existing_kind = table._v_attrs.index_kind
- if existing_kind != index_kind:
- raise TypeError("incompatible kind in index [%s - %s]" %
- (existing_kind, index_kind))
-
- # add kinds
- table._v_attrs.index_kind = index_kind
- table._v_attrs.columns_kind = cols_kind
- if append:
- existing_fields = getattr(table._v_attrs, 'fields', None)
- if (existing_fields is not None and
- existing_fields != list(items)):
- raise Exception("appended items do not match existing items"
- " in table!")
- # this depends on creation order of the table
- table._v_attrs.fields = list(items)
-
- # add the rows
- try:
- for i, index in enumerate(index_converted):
- for c, col in enumerate(columns_converted):
- v = values[:, i, c]
-
- # don't store the row if all values are np.nan
- if np.isnan(v).all():
- continue
-
- row = table.row
- row['index'] = index
- row['column'] = col
-
- # create the values array
- row['values'] = v
- row.append()
- self.handle.flush()
- except (ValueError), detail: # pragma: no cover
- print "value_error in _write_table -> %s" % str(detail)
- try:
- self.handle.flush()
- except Exception:
- pass
- raise
-
def _read_group(self, group, where=None):
kind = group._v_attrs.pandas_type
kind = _LEGACY_MAP.get(kind, kind)
@@ -853,100 +798,786 @@ def _read_index_legacy(self, group, key):
node = getattr(group, key)
data = node[:]
kind = node._v_attrs.kind
-
return _unconvert_index_legacy(data, kind)
def _read_frame_table(self, group, where=None):
- return self._read_panel_table(group, where)['value']
+ t = create_table(self, group)
+ return t.read(where)
+
+
+class Col(object):
+ """ a column description class
+
+ Parameters
+ ----------
+
+ values : the ndarray like converted values
+ kind : a string description of this type
+ typ : the pytables type
+
+ """
+ is_indexable = True
+
+ def __init__(self, values = None, kind = None, typ = None, cname = None, itemsize = None, name = None, kind_attr = None, **kwargs):
+ self.values = values
+ self.kind = kind
+ self.typ = typ
+ self.itemsize = itemsize
+ self.name = None
+ self.cname = cname
+ self.kind_attr = None
+ self.table = None
+
+ if name is not None:
+ self.set_name(name, kind_attr)
+
+ def set_name(self, name, kind_attr = None):
+ self.name = name
+ self.kind_attr = kind_attr or "%s_kind" % name
+ if self.cname is None:
+ self.cname = name
+
+ return self
+
+ def set_table(self, table):
+ self.table = table
+ return self
+
+ def __repr__(self):
+ return "name->%s,cname->%s,kind->%s" % (self.name,self.cname,self.kind)
+
+ __str__ = __repr__
+
+ def copy(self):
+ new_self = copy.copy(self)
+ return new_self
+
+ def infer(self, table):
+ """ infer this column from the table: create and return a new object """
+ new_self = self.copy()
+ new_self.set_table(table)
+ new_self.get_attr()
+ return new_self
+
+ def convert(self, sel):
+ """ set the values from this selection """
+ self.values = _maybe_convert(sel.values[self.cname], self.kind)
+
+ @property
+ def attrs(self):
+ return self.table._v_attrs
+
+ @property
+ def description(self):
+ return self.table.description
+
+ @property
+ def pos(self):
+ """ my column position """
+ return getattr(self.col,'_v_pos',None)
+
+ @property
+ def col(self):
+ """ return my current col description """
+ return getattr(self.description,self.cname,None)
+
+ @property
+ def cvalues(self):
+ """ return my cython values """
+ return self.values
+
+ def __iter__(self):
+ return iter(self.values)
+
+ def maybe_set_size(self, min_itemsize = None, **kwargs):
+ """ maybe set a string col itemsize """
+ if self.kind == 'string' and min_itemsize is not None:
+ if self.typ.itemsize < min_itemsize:
+ self.typ = _tables().StringCol(itemsize = min_itemsize, pos = getattr(self.typ,'pos',None))
+
+ def validate_and_set(self, table, append, **kwargs):
+ self.set_table(table)
+ self.validate_col()
+ self.validate_attr(append)
+ self.set_attr()
+
+ def validate_col(self):
+ """ validate this column & set table data for it """
+
+ # validate this column for string truncation (or reset to the max size)
+ if self.kind == 'string':
+
+ c = self.col
+ if c is not None:
+ if c.itemsize < self.itemsize:
+ raise Exception("[%s] column has a min_itemsize of [%s] but itemsize [%s] is required!" % (self.cname,self.itemsize,c.itemsize))
+
+
+ def validate_attr(self, append):
+ # check for backwards incompatibility
+ if append:
+ existing_kind = getattr(self.attrs,self.kind_attr,None)
+ if existing_kind is not None and existing_kind != self.kind:
+ raise TypeError("incompatible kind in col [%s - %s]" %
+ (existing_kind, self.kind))
+
+ def get_attr(self):
+ """ set the kind for this colummn """
+ self.kind = getattr(self.attrs,self.kind_attr,None)
+
+ def set_attr(self):
+ """ set the kind for this colummn """
+ setattr(self.attrs,self.kind_attr,self.kind)
+
+class DataCol(Col):
+ """ a data holding column, by definition this is not indexable
+
+ Parameters
+ ----------
+
+ data : the actual data
+ cname : the column name in the table to hold the data (typeically values)
+ """
+ is_indexable = False
+
+ @classmethod
+ def create_for_block(cls, i, **kwargs):
+ """ return a new datacol with the block i """
+ return cls(name = 'values_%d' % i, cname = 'values_block_%d' % i, **kwargs)
+
+ def __init__(self, values = None, kind = None, typ = None, cname = None, data = None, **kwargs):
+ super(DataCol, self).__init__(values = values, kind = kind, typ = typ, cname = cname, **kwargs)
+ self.dtype = None
+ self.dtype_attr = "%s_dtype" % self.name
+ self.set_data(data)
+
+ def __repr__(self):
+ return "name->%s,cname->%s,dtype->%s,shape->%s" % (self.name,self.cname,self.dtype,self.shape)
+
+ def set_data(self, data):
+ self.data = data
+ if data is not None:
+ if self.dtype is None:
+ self.dtype = data.dtype.name
+
+ @property
+ def shape(self):
+ return getattr(self.data,'shape',None)
+
+ @property
+ def cvalues(self):
+ """ return my cython values """
+ return self.data
+
+ def validate_attr(self, append):
+ """ validate that we have the same order as the existing & same dtype """
+ if append:
+ existing_fields = getattr(self.attrs, self.kind_attr, None)
+ if (existing_fields is not None and
+ existing_fields != list(self.values)):
+ raise Exception("appended items do not match existing items"
+ " in table!")
+
+ existing_dtype = getattr(self.attrs, self.dtype_attr, None)
+ if (existing_dtype is not None and
+ existing_dtype != self.dtype):
+ raise Exception("appended items dtype do not match existing items dtype"
+ " in table!")
+
+ def convert(self, sel):
+ """ set the data from this selection (and convert to the correct dtype if we can) """
+ self.set_data(sel.values[self.cname])
+
+ # convert to the correct dtype
+ if self.dtype is not None:
+ try:
+ self.data = self.data.astype(self.dtype)
+ except:
+ self.data = self.data.astype('O')
+
+ def get_attr(self):
+ """ get the data for this colummn """
+ self.values = getattr(self.attrs,self.kind_attr,None)
+ self.dtype = getattr(self.attrs,self.dtype_attr,None)
+
+ def set_attr(self):
+ """ set the data for this colummn """
+ setattr(self.attrs,self.kind_attr,self.values)
+ if self.dtype is not None:
+ setattr(self.attrs,self.dtype_attr,self.dtype)
+
+class Table(object):
+ """ represent a table:
+ facilitate read/write of various types of tables
+ this is an abstract base class
+
+ Parameters
+ ----------
+
+ parent : my parent HDFStore
+ group : the group node where the table resides
+
+ """
+ table_type = None
+ ndim = None
+ axis_names = ['index','column']
+
+ def __init__(self, parent, group):
+ self.parent = parent
+ self.group = group
+ self.index_axes = []
+ self.non_index_axes = []
+ self.values_axes = []
+ self.selection = None
+
+ @property
+ def pandas_type(self):
+ return getattr(self.group._v_attrs,'pandas_type',None)
+
+ def __repr__(self):
+ """ return a pretty representatgion of myself """
+ return "%s (typ->%s,nrows->%s)" % (self.pandas_type,self.table_type,self.nrows)
+
+ __str__ = __repr__
+
+ @property
+ def nrows(self):
+ return getattr(self.table,'nrows',None)
+
+ @property
+ def table(self):
+ """ return the table group """
+ return getattr(self.group, 'table', None)
+
+ @property
+ def handle(self):
+ return self.parent.handle
+
+ @property
+ def _quiet(self):
+ return self.parent._quiet
+
+ @property
+ def filters(self):
+ return self.parent.filters
+
+ @property
+ def complevel(self):
+ return self.parent.complevel
+
+ @property
+ def fletcher32(self):
+ return self.parent.fletcher32
+
+ @property
+ def complib(self):
+ return self.parent.complib
+
+ @property
+ def attrs(self):
+ return self.group._v_attrs
+
+ @property
+ def description(self):
+ return self.table.description
+
+ @property
+ def is_transpose(self):
+ """ does my data need transposition """
+ return False
+
+ @property
+ def axes(self):
+ return itertools.chain(self.index_axes, self.values_axes)
+
+ def kinds_map(self):
+ """ return a diction of columns -> kinds """
+ return dict([ (a.cname,a.kind) for a in self.axes ])
+
+ def index_cols(self):
+ """ return a list of my index cols """
+ return [ i.cname for i in self.index_axes ]
+
+ def values_cols(self):
+ """ return a list of my values cols """
+ return [ i.cname for i in self.values_axes ]
+
+ def set_attrs(self):
+ """ set our table type & indexables """
+ self.attrs.table_type = self.table_type
+ self.attrs.index_cols = self.index_cols()
+ self.attrs.values_cols = self.values_cols()
+ self.attrs.non_index_axes = self.non_index_axes
+
+ def validate(self):
+ """ raise if we have an incompitable table type with the current """
+ et = getattr(self.attrs,'table_type',None)
+ if et is not None and et != self.table_type:
+ raise TypeError("incompatible table_type with existing [%s - %s]" %
+ (et, self.table_type))
+ ic = getattr(self.attrs,'index_cols',None)
+ if ic is not None and ic != self.index_cols():
+ raise TypeError("incompatible index cols with existing [%s - %s]" %
+ (ic, self.index_cols()))
+
+ @property
+ def indexables(self):
+ """ create/cache the indexables if they don't exist """
+ if self._indexables is None:
+
+ d = self.description
+ self._indexables = []
+
+ # index columns
+ self._indexables.extend([ Col(name = i) for i in self.attrs.index_cols ])
+
+ # data columns
+ self._indexables.extend([ DataCol.create_for_block(i = i) for i, c in enumerate(self.attrs.values_cols) ])
+
+ return self._indexables
+
+ def create_index(self, columns = None, optlevel = None, kind = None):
+ """
+ Create a pytables index on the specified columns
+ note: cannot index Time64Col() currently; PyTables must be >= 2.3.1
+
+
+ Paramaters
+ ----------
+ columns : None or list_like (the columns to index - currently supports index/column)
+ optlevel: optimization level (defaults to 6)
+ kind : kind of index (defaults to 'medium')
+
+ Exceptions
+ ----------
+ raises if the node is not a table
+
+ """
+
+ table = self.table
+ if table is None: return
+
+ if columns is None:
+ columns = ['index']
+ if not isinstance(columns, (tuple,list)):
+ columns = [ columns ]
+
+ kw = dict()
+ if optlevel is not None:
+ kw['optlevel'] = optlevel
+ if kind is not None:
+ kw['kind'] = kind
+
+ for c in columns:
+ v = getattr(table.cols,c,None)
+ if v is not None and not v.is_indexed:
+ v.createIndex(**kw)
+
+ def read_axes(self, where):
+ """ create and return the axes sniffed from the table """
- def _read_panel_table(self, group, where=None):
- table = getattr(group, 'table')
- fields = table._v_attrs.fields
+ # infer the data kind
+ self.infer_axes()
# create the selection
- sel = Selection(table, where, table._v_attrs.index_kind)
- sel.select()
- fields = table._v_attrs.fields
+ self.selection = Selection(self, where)
+ self.selection.select()
- columns = _maybe_convert(sel.values['column'],
- table._v_attrs.columns_kind)
- index = _maybe_convert(sel.values['index'], table._v_attrs.index_kind)
- values = sel.values['values']
+ # convert the data
+ for a in self.axes:
+ a.convert(self.selection)
- major = Factor.from_array(index)
- minor = Factor.from_array(columns)
+ def infer_axes(self):
+ """ infer the axes from the indexables """
+ self.index_axes, self.values_axes = [ a.infer(self.table) for a in self.indexables if a.is_indexable ], [ a.infer(self.table) for a in self.indexables if not a.is_indexable ]
+ self.non_index_axes = getattr(self.attrs,'non_index_axes',None) or []
+
+ def create_axes(self, axes_to_index, obj, validate = True, min_itemsize = None):
+ """ create and return the axes
+ leagcy tables create an indexable column, indexable index, non-indexable fields
+
+ """
+
+ self.index_axes = []
+ self.non_index_axes = []
+
+ # create axes to index and non_index
+ j = 0
+ for i, a in enumerate(obj.axes):
+ if i in axes_to_index:
+ self.index_axes.append(_convert_index(a).set_name(self.axis_names[j]))
+ j += 1
+ else:
+ self.non_index_axes.append((i,list(a)))
+
+ # check for column conflicts
+ if validate:
+ for a in self.axes:
+ a.maybe_set_size(min_itemsize = min_itemsize)
+
+ # add my values
+ self.values_axes = []
+ for i, b in enumerate(obj._data.blocks):
+ values = b.values
+
+ # a string column
+ if b.dtype.name == 'object':
+ atom = _tables().StringCol(itemsize = values.dtype.itemsize, shape = b.shape[0])
+ utype = 'S8'
+ else:
+ atom = getattr(_tables(),"%sCol" % b.dtype.name.capitalize())(shape = b.shape[0])
+ utype = atom._deftype
+
+ # coerce data to this type
+ try:
+ values = values.astype(utype)
+ except (Exception), detail:
+ raise Exception("cannot coerce data type -> [dtype->%s]" % b.dtype.name)
+
+ dc = DataCol.create_for_block(i = i, values = list(b.items), kind = b.dtype.name, typ = atom, data = values)
+ self.values_axes.append(dc)
+
+ def create_description(self, compression = None, complevel = None):
+ """ create the description of the table from the axes & values """
+
+ d = { 'name' : 'table' }
+
+ # description from the axes & values
+ d['description'] = dict([ (a.cname,a.typ) for a in self.axes ])
+
+ if compression:
+ complevel = self.complevel
+ if complevel is None:
+ complevel = 9
+ filters = _tables().Filters(complevel=complevel,
+ complib=compression,
+ fletcher32=self.fletcher32)
+ d['filters'] = filters
+ elif self.filters is not None:
+ d['filters'] = self.filters
+ return d
+
+ def read(self, **kwargs):
+ raise NotImplementedError("cannot read on an abstract table: subclasses should implement")
+
+ def write(self, **kwargs):
+ raise NotImplementedError("cannot write on an abstract table")
+
+ def delete(self, where = None, **kwargs):
+ """ support fully deleting the node in its entirety (only) - where specification must be None """
+ if where is None:
+ self.handle.removeNode(self.group, recursive=True)
+ return None
+
+ raise NotImplementedError("cannot delete on an abstract table")
+
+class WORMTable(Table):
+ """ a write-once read-many table:
+ this format DOES NOT ALLOW appending to a table. writing is a one-time operation
+ the data are stored in a format that allows for searching the data on disk
+ """
+ table_type = 'worm'
+
+ def read(self, **kwargs):
+ """ read the indicies and the indexing array, calculate offset rows and return """
+ raise NotImplementedError("WORMTable needs to implement read")
+
+ def write(self, **kwargs):
+ """ write in a format that we can search later on (but cannot append to):
+ write out the indicies and the values using _write_array (e.g. a CArray)
+ create an indexing table so that we can search """
+ raise NotImplementedError("WORKTable needs to implement write")
+
+class LegacyTable(Table):
+ """ an appendable table:
+ allow append/query/delete operations to a (possibily) already existing appendable table
+ this table ALLOWS append (but doesn't require them), and stores the data in a format
+ that can be easily searched
+
+ """
+ _indexables = [Col(name = 'index'),Col(name = 'column', index_kind = 'columns_kind'), DataCol(name = 'fields', cname = 'values', kind_attr = 'fields') ]
+ table_type = 'legacy'
+
+ def read(self, where=None):
+ """ we have 2 indexable columns, with an arbitrary number of data axes """
+
+ self.read_axes(where)
+
+ index = self.index_axes[0].values
+ column = self.index_axes[1].values
+
+ major = Factor.from_array(index)
+ minor = Factor.from_array(column)
+
J, K = len(major.levels), len(minor.levels)
key = major.labels * K + minor.labels
+ panels = []
if len(unique(key)) == len(key):
sorter, _ = lib.groupsort_indexer(com._ensure_int64(key), J * K)
sorter = com._ensure_platform_int(sorter)
- # the data need to be sorted
- sorted_values = values.take(sorter, axis=0)
- major_labels = major.labels.take(sorter)
- minor_labels = minor.labels.take(sorter)
+ # create the panels
+ for c in self.values_axes:
+
+ # the data need to be sorted
+ sorted_values = c.data.take(sorter, axis=0)
+ major_labels = major.labels.take(sorter)
+ minor_labels = minor.labels.take(sorter)
+ items = Index(c.values)
- block = block2d_to_block3d(sorted_values, fields, (J, K),
- major_labels, minor_labels)
+ block = block2d_to_block3d(sorted_values, items, (J, K),
+ major_labels, minor_labels)
+
+ mgr = BlockManager([block], [items, major.levels, minor.levels])
+ panels.append(Panel(mgr))
- mgr = BlockManager([block], [block.ref_items,
- major.levels, minor.levels])
- wp = Panel(mgr)
else:
if not self._quiet: # pragma: no cover
print ('Duplicate entries in table, taking most recently '
'appended')
# reconstruct
- long_index = MultiIndex.from_arrays([index, columns])
- lp = DataFrame(values, index=long_index, columns=fields)
+ long_index = MultiIndex.from_arrays([index, column])
+
+ panels = []
+ for c in self.values_axes:
+ lp = DataFrame(c.data, index=long_index, columns=c.values)
+
+ # need a better algorithm
+ tuple_index = long_index._tuple_index
- # need a better algorithm
- tuple_index = long_index._tuple_index
+ unique_tuples = lib.fast_unique(tuple_index)
+ unique_tuples = _asarray_tuplesafe(unique_tuples)
- unique_tuples = lib.fast_unique(tuple_index)
- unique_tuples = _asarray_tuplesafe(unique_tuples)
+ indexer = match(unique_tuples, tuple_index)
+ indexer = com._ensure_platform_int(indexer)
- indexer = match(unique_tuples, tuple_index)
- indexer = com._ensure_platform_int(indexer)
+ new_index = long_index.take(indexer)
+ new_values = lp.values.take(indexer, axis=0)
- new_index = long_index.take(indexer)
- new_values = lp.values.take(indexer, axis=0)
+ lp = DataFrame(new_values, index=new_index, columns=lp.columns)
+ panels.append(lp.to_panel())
- lp = DataFrame(new_values, index=new_index, columns=lp.columns)
- wp = lp.to_panel()
+ # append the panels
+ wp = concat(panels, axis = 0, verify_integrity = True)
+
+ # reorder by any non_index_axes
+ for axis,labels in self.non_index_axes:
+ wp = wp.reindex_axis(labels,axis=axis,copy=False)
+
+ if self.selection.filter:
+ new_minor = sorted(set(wp.minor_axis) & self.selection.filter)
+ wp = wp.reindex(minor=new_minor, copy = False)
- if sel.column_filter:
- new_minor = sorted(set(wp.minor_axis) & sel.column_filter)
- wp = wp.reindex(minor=new_minor)
return wp
+ def write(self, axes_to_index, obj, append=False, compression=None,
+ complevel=None, min_itemsize = None, **kwargs):
+
+ # create the table if it doesn't exist (or get it if it does)
+ if not append:
+ if 'table' in self.group:
+ self.handle.removeNode(self.group, 'table')
+
+ # create the axes
+ self.create_axes(axes_to_index = axes_to_index, obj = obj, validate = append, min_itemsize = min_itemsize)
+
+ if 'table' not in self.group:
+
+ # create the table
+ options = self.create_description(compression = compression, complevel = complevel)
+
+ # set the table attributes
+ self.set_attrs()
+
+ # create the table
+ table = self.handle.createTable(self.group, **options)
+
+ else:
+
+ # the table must already exist
+ table = self.table
+
+ # validate the table
+ self.validate()
+
+ # validate the axes and set the kinds
+ for a in self.axes:
+ a.validate_and_set(table, append)
+
+ # add the rows
+ self._write_data()
+ self.handle.flush()
+
+ def _write_data(self):
+ """ fast writing of data: requires specific cython routines each axis shape """
+
+ masks = []
+
+ # create the masks
+ for a in self.values_axes:
+
+ # figure the mask: only do if we can successfully process this column, otherwise ignore the mask
+ try:
+ mask = np.isnan(a.data).all(axis=0)
+ masks.append(mask.astype('u1'))
+ except:
+
+ # need to check for Nan in a non-numeric type column!!!
+ masks.append(np.zeros((a.data.shape[1:]), dtype = 'u1'))
+
+ # consolidate masks
+ mask = masks[0]
+ for m in masks[1:]:
+ m = mask & m
+
+ # the arguments & values
+ args = [ a.cvalues for a in self.index_axes ]
+ values = [ a.data for a in self.values_axes ]
+
+ # get our function
+ try:
+ func = getattr(pylib,"create_hdf_rows_%sd" % self.ndim)
+ args.append(mask)
+ args.append(values)
+ rows = func(*args)
+ if len(rows):
+ self.table.append(rows)
+ except (Exception), detail:
+ raise Exception("tables cannot write this data -> %s" % str(detail))
+
+ def delete(self, where = None):
+ if where is None:
+ return super(LegacyTable, self).delete()
- def _delete_from_table(self, group, where = None):
- table = getattr(group, 'table')
+ # infer the data kind
+ table = self.table
+ self.infer_axes()
# create the selection
- s = Selection(table, where, table._v_attrs.index_kind)
- s.select_coords()
+ self.selection = Selection(self, where)
+ self.selection.select_coords()
# delete the rows in reverse order
- l = list(s.values)
- l.reverse()
- for c in l:
- table.removeRows(c)
- self.handle.flush()
- return len(s.values)
+ l = list(self.selection.values)
+ ln = len(l)
+
+ if ln:
+
+ # if we can do a consecutive removal - do it!
+ if l[0]+ln-1 == l[-1]:
+ table.removeRows(start = l[0], stop = l[-1]+1)
+
+ # one by one
+ else:
+ l.reverse()
+ for c in l:
+ table.removeRows(c)
+
+ self.handle.flush()
+
+ # return the number of rows removed
+ return ln
+
+
+class LegacyFrameTable(LegacyTable):
+ """ support the legacy frame table """
+ table_type = 'legacy_frame'
+ def read(self, *args, **kwargs):
+ return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
+
+class LegacyPanelTable(LegacyTable):
+ """ support the legacy panel table """
+ table_type = 'legacy_panel'
+
+class AppendableTable(LegacyTable):
+ """ suppor the new appendable table formats """
+ _indexables = None
+ table_type = 'appendable'
+
+class AppendableFrameTable(AppendableTable):
+ """ suppor the new appendable table formats """
+ table_type = 'appendable_frame'
+ ndim = 2
+
+ def read(self, where=None):
+
+ self.read_axes(where)
+
+ index = Index(self.index_axes[0].values)
+ frames = []
+ for a in self.values_axes:
+ columns = Index(a.values)
+ block = make_block(a.cvalues.T, columns, columns)
+ mgr = BlockManager([ block ], [ columns, index ])
+ frames.append(DataFrame(mgr))
+ df = concat(frames, axis = 1, verify_integrity = True)
+
+ # sort the indicies & reorder the columns
+ for axis,labels in self.non_index_axes:
+ df = df.reindex_axis(labels,axis=axis,copy=False)
+ columns_ordered = df.columns
+
+ # apply the column filter (but keep columns in the same order)
+ if self.selection.filter:
+ columns = Index(set(columns_ordered) & self.selection.filter)
+ columns = sorted(columns_ordered.get_indexer(columns))
+ df = df.reindex(columns = columns_ordered.take(columns), copy = False)
+
+ else:
+ df = df.reindex(columns = columns_ordered, copy = False)
+
+ return df
+
+class AppendablePanelTable(AppendableTable):
+ """ suppor the new appendable table formats """
+ table_type = 'appendable_panel'
+ ndim = 3
+
+# table maps
+_TABLE_MAP = {
+ 'appendable_frame' : AppendableFrameTable,
+ 'appendable_panel' : AppendablePanelTable,
+ 'worm' : WORMTable,
+ 'legacy_frame' : LegacyFrameTable,
+ 'legacy_panel' : LegacyPanelTable,
+ 'default' : AppendablePanelTable,
+}
+
+def create_table(parent, group, typ = None, **kwargs):
+ """ return a suitable Table class to operate """
+
+ pt = getattr(group._v_attrs,'pandas_type',None)
+ tt = getattr(group._v_attrs,'table_type',None)
+
+ # a new node
+ if pt is None:
+
+ return (_TABLE_MAP.get(typ) or _TABLE_MAP.get('default'))(parent, group, **kwargs)
+
+ # existing node (legacy)
+ if tt is None:
+
+ # distiguish between a frame/table
+ tt = 'legacy_panel'
+ try:
+ if group.table.description.values.shape[0] == 1:
+ tt = 'legacy_frame'
+ except:
+ pass
+
+ return _TABLE_MAP.get(tt)(parent, group, **kwargs)
def _convert_index(index):
if isinstance(index, DatetimeIndex):
converted = index.asi8
- return converted, 'datetime64', _tables().Int64Col()
+ return Col(converted, 'datetime64', _tables().Int64Col())
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
- return index.values, 'integer', atom
+ return Col(index.values, 'integer', atom)
if isinstance(index, MultiIndex):
raise Exception('MultiIndex not supported here!')
@@ -957,36 +1588,36 @@ def _convert_index(index):
if inferred_type == 'datetime64':
converted = values.view('i8')
- return converted, 'datetime64', _tables().Int64Col()
+ return Col(converted, 'datetime64', _tables().Int64Col())
elif inferred_type == 'datetime':
converted = np.array([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
- return converted, 'datetime', _tables().Time64Col()
+ return Col(converted, 'datetime', _tables().Time64Col())
elif inferred_type == 'date':
converted = np.array([time.mktime(v.timetuple()) for v in values],
dtype=np.int32)
- return converted, 'date', _tables().Time32Col()
+ return Col(converted, 'date', _tables().Time32Col())
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = np.array(list(values), dtype=np.str_)
itemsize = converted.dtype.itemsize
- return converted, 'string', _tables().StringCol(itemsize)
+ return Col(converted, 'string', _tables().StringCol(itemsize), itemsize = itemsize)
elif inferred_type == 'unicode':
atom = _tables().ObjectAtom()
- return np.asarray(values, dtype='O'), 'object', atom
+ return Col(np.asarray(values, dtype='O'), 'object', atom)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
- return np.asarray(values, dtype=np.int64), 'integer', atom
+ return Col(np.asarray(values, dtype=np.int64), 'integer', atom)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
- return np.asarray(values, dtype=np.float64), 'float', atom
+ return Col(np.asarray(values, dtype=np.float64), 'float', atom)
else: # pragma: no cover
atom = _tables().ObjectAtom()
- return np.asarray(values, dtype='O'), 'object', atom
+ return Col(np.asarray(values, dtype='O'), 'object', atom)
def _read_array(group, key):
@@ -1093,87 +1724,225 @@ def _alias_to_class(alias):
return _reverse_index_map.get(alias, Index)
+class Term(object):
+ """ create a term object that holds a field, op, and value
+
+ Parameters
+ ----------
+ field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel)
+ op : a valid op (defaults to '=') (optional)
+ >, >=, <, <=, =, != (not equal) are allowed
+ value : a value or list of values (required)
+ kinds : the kinds map (dict of column name -> kind)
+
+ Returns
+ -------
+ a Term object
+
+ Examples
+ --------
+ Term(dict(field = 'index', op = '>', value = '20121114'))
+ Term('index', '20121114')
+ Term('index', '>', '20121114')
+ Term('index', ['20121114','20121114'])
+ Term('index', datetime(2012,11,14))
+ Term('major>20121114')
+ Term('minor', ['A','B'])
+
+ """
+
+ _ops = ['<=','<','>=','>','!=','=']
+ _search = re.compile("^(?P<field>\w+)(?P<op>%s)(?P<value>.+)$" % '|'.join(_ops))
+ _index = ['index','major_axis','major']
+ _column = ['column','minor_axis','minor']
+
+ def __init__(self, field, op = None, value = None, kinds = None):
+ self.field = None
+ self.op = None
+ self.value = None
+ self.kinds = kinds or dict()
+ self.filter = None
+ self.condition = None
+
+ # unpack lists/tuples in field
+ while(isinstance(field,(tuple,list))):
+ f = field
+ field = f[0]
+ if len(f) > 1:
+ op = f[1]
+ if len(f) > 2:
+ value = f[2]
+
+ # backwards compatible
+ if isinstance(field, dict):
+ self.field = field.get('field')
+ self.op = field.get('op') or '='
+ self.value = field.get('value')
+
+ # passed a term
+ elif isinstance(field,Term):
+ self.field = field.field
+ self.op = field.op
+ self.value = field.value
+
+ # a string expression (or just the field)
+ elif isinstance(field,basestring):
+
+ # is a term is passed
+ s = self._search.match(field)
+ if s is not None:
+ self.field = s.group('field')
+ self.op = s.group('op')
+ self.value = s.group('value')
+
+ else:
+ self.field = field
+
+ # is an op passed?
+ if isinstance(op, basestring) and op in self._ops:
+ self.op = op
+ self.value = value
+ else:
+ self.op = '='
+ self.value = op
+
+ else:
+ raise Exception("Term does not understand the supplied field [%s]" % field)
+
+ # we have valid fields
+ if self.field is None or self.op is None or self.value is None:
+ raise Exception("Could not create this term [%s]" % str(self))
+
+ # valid field name
+ if self.field in self._index:
+ self.field = 'index'
+ elif self.field in self._column:
+ self.field = 'column'
+ else:
+ raise Exception("field is not a valid index/column for this term [%s]" % str(self))
+
+ # we have valid conditions
+ if self.op in ['>','>=','<','<=']:
+ if hasattr(self.value,'__iter__') and len(self.value) > 1:
+ raise Exception("an inequality condition cannot have multiple values [%s]" % str(self))
+
+ if not hasattr(self.value,'__iter__'):
+ self.value = [ self.value ]
+
+ self.eval()
+
+ def __str__(self):
+ return "field->%s,op->%s,value->%s" % (self.field,self.op,self.value)
+
+ __repr__ = __str__
+
+ @property
+ def is_in_table(self):
+ """ return True if this is a valid column name for generation (e.g. an actual column in the table) """
+ return self.field in self.kinds
+
+ @property
+ def kind(self):
+ """ the kind of my field """
+ return self.kinds.get(self.field)
+
+ def eval(self):
+ """ set the numexpr expression for this term """
+
+ # convert values
+ values = [ self.convert_value(v) for v in self.value ]
+
+ # equality conditions
+ if self.op in ['=','!=']:
+
+ if self.is_in_table:
+
+ # too many values to create the expression?
+ if len(values) <= 61:
+ self.condition = "(%s)" % ' | '.join([ "(%s == %s)" % (self.field,v[0]) for v in values])
+
+ # use a filter after reading
+ else:
+ self.filter = set([ v[1] for v in values ])
+
+ else:
+
+ self.filter = set([ v[1] for v in values ])
+
+ else:
+
+ if self.is_in_table:
+
+ self.condition = '(%s %s %s)' % (self.field, self.op, values[0][0])
+
+ def convert_value(self, v):
+
+ if self.field == 'index':
+ if self.kind == 'datetime64' :
+ return [lib.Timestamp(v).value, None]
+ elif isinstance(v, datetime):
+ return [time.mktime(v.timetuple()), None]
+ elif not isinstance(v, basestring):
+ return [str(v), None]
+
+ # string quoting
+ return ["'" + v + "'", v]
+
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
- table : tables.Table
- where : list of dicts of the following form
-
- Comparison op
- {'field' : 'index',
- 'op' : '>=',
- 'value' : value}
-
- Match single value
- {'field' : 'index',
- 'value' : v1}
+ table : a Table object
+ where : list of Terms (or convertable to)
- Match a set of values
- {'field' : 'index',
- 'value' : [v1, v2, v3]}
"""
- def __init__(self, table, where=None, index_kind=None):
- self.table = table
- self.where = where
- self.index_kind = index_kind
- self.column_filter = None
- self.the_condition = None
- self.conditions = []
- self.values = None
- if where:
- self.generate(where)
+ def __init__(self, table, where=None):
+ self.table = table
+ self.where = where
+ self.values = None
+ self.condition = None
+ self.filter = None
+ self.terms = self.generate(where)
+
+ # create the numexpr & the filter
+ if self.terms:
+ conds = [ t.condition for t in self.terms if t.condition is not None ]
+ if len(conds):
+ self.condition = "(%s)" % ' & '.join(conds)
+ self.filter = set()
+ for t in self.terms:
+ if t.filter is not None:
+ self.filter |= t.filter
def generate(self, where):
- # and condictions
- for c in where:
- op = c.get('op', None)
- value = c['value']
- field = c['field']
-
- if field == 'index' and self.index_kind == 'datetime64':
- val = lib.Timestamp(value).value
- self.conditions.append('(%s %s %s)' % (field, op, val))
- elif field == 'index' and isinstance(value, datetime):
- value = time.mktime(value.timetuple())
- self.conditions.append('(%s %s %s)' % (field, op, value))
- else:
- self.generate_multiple_conditions(op, value, field)
-
- if len(self.conditions):
- self.the_condition = '(' + ' & '.join(self.conditions) + ')'
-
- def generate_multiple_conditions(self, op, value, field):
+ """ where can be a : dict,list,tuple,string """
+ if where is None: return None
- if op and op == 'in' or isinstance(value, (list, np.ndarray)):
- if len(value) <= 61:
- l = '(' + ' | '.join([ "(%s == '%s')" % (field, v)
- for v in value]) + ')'
- self.conditions.append(l)
- else:
- self.column_filter = set(value)
+ if not isinstance(where, (list,tuple)):
+ where = [ where ]
else:
- if op is None:
- op = '=='
- self.conditions.append('(%s %s "%s")' % (field, op, value))
+ # do we have all list/tuple
+ if not any([ isinstance(w, (list,tuple,Term)) for w in where ]):
+ where = [ where ]
+
+ return [ Term(c, kinds = self.table.kinds_map()) for c in where ]
def select(self):
"""
generate the selection
"""
- if self.the_condition:
- self.values = self.table.readWhere(self.the_condition)
-
+ if self.condition is not None:
+ self.values = self.table.table.readWhere(self.condition)
else:
- self.values = self.table.read()
+ self.values = self.table.table.read()
def select_coords(self):
"""
generate the selection
"""
- self.values = self.table.getWhereList(self.the_condition)
+ self.values = self.table.table.getWhereList(self.condition)
def _get_index_factory(klass):
diff --git a/pandas/io/tests/legacy_table.h5 b/pandas/io/tests/legacy_table.h5
new file mode 100644
index 0000000000000..1c90382d9125c
Binary files /dev/null and b/pandas/io/tests/legacy_table.h5 differ
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index afd05610e3427..0f7da8e827615 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -8,10 +8,11 @@
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
-from pandas.io.pytables import HDFStore, get_store
+from pandas.io.pytables import HDFStore, get_store, Term
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
+from pandas import concat
try:
import tables
@@ -64,7 +65,9 @@ def test_repr(self):
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
+ self.store.append('e', tm.makePanel())
repr(self.store)
+ str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
@@ -139,10 +142,69 @@ def test_put_integer(self):
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
+ pth = '__test_append__.h5'
+
+ try:
+ store = HDFStore(pth)
+
+ df = tm.makeTimeDataFrame()
+ store.append('df1', df[:10])
+ store.append('df1', df[10:])
+ tm.assert_frame_equal(store['df1'], df)
+
+ store.put('df2', df[:10], table=True)
+ store.append('df2', df[10:])
+ tm.assert_frame_equal(store['df2'], df)
+
+ wp = tm.makePanel()
+ store.append('wp1', wp.ix[:,:10,:])
+ store.append('wp1', wp.ix[:,10:,:])
+ tm.assert_panel_equal(store['wp1'], wp)
+
+ except:
+ raise
+ finally:
+ store.close()
+ os.remove(pth)
+
+ def test_append_with_strings(self):
+ wp = tm.makePanel()
+ wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
+
+ self.store.append('s1', wp, min_itemsize = 20)
+ self.store.append('s1', wp2)
+ expected = concat([ wp, wp2], axis = 2)
+ expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
+ tm.assert_panel_equal(self.store['s1'], expected)
+
+ # test truncation of bigger strings
+ self.store.append('s2', wp)
+ self.assertRaises(Exception, self.store.append, 's2', wp2)
+
+ def test_create_table_index(self):
+ wp = tm.makePanel()
+ self.store.append('p5', wp)
+ self.store.create_table_index('p5')
+
+ assert(self.store.handle.root.p5.table.cols.index.is_indexed == True)
+ assert(self.store.handle.root.p5.table.cols.column.is_indexed == False)
+
df = tm.makeTimeDataFrame()
- self.store.put('c', df[:10], table=True)
- self.store.append('c', df[10:])
- tm.assert_frame_equal(self.store['c'], df)
+ self.store.append('f', df[:10])
+ self.store.append('f', df[10:])
+ self.store.create_table_index('f')
+
+ # create twice
+ self.store.create_table_index('f')
+
+ # try to index a non-table
+ self.store.put('f2', df)
+ self.assertRaises(Exception, self.store.create_table_index, 'f2')
+
+ # try to change the version supports flag
+ from pandas.io import pytables
+ pytables._table_supports_index = False
+ self.assertRaises(Exception, self.store.create_table_index, 'f')
def test_append_diff_item_order(self):
wp = tm.makePanel()
@@ -153,7 +215,7 @@ def test_append_diff_item_order(self):
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
- def test_append_incompatible_dtypes(self):
+ def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
@@ -162,6 +224,51 @@ def test_append_incompatible_dtypes(self):
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
+ def test_table_values_dtypes_roundtrip(self):
+ df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
+ self.store.append('df1', df1)
+ assert df1.dtypes == self.store['df1'].dtypes
+
+ df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
+ self.store.append('df2', df2)
+ assert df2.dtypes == self.store['df2'].dtypes
+
+ # incompatible dtype
+ self.assertRaises(Exception, self.store.append, 'df2', df1)
+
+ def test_table_mixed_dtypes(self):
+
+ # frame
+ def _make_one_df():
+ df = tm.makeDataFrame()
+ df['obj1'] = 'foo'
+ df['obj2'] = 'bar'
+ df['bool1'] = df['A'] > 0
+ df['bool2'] = df['B'] > 0
+ df['int1'] = 1
+ df['int2'] = 2
+ return df.consolidate()
+
+ df1 = _make_one_df()
+
+ self.store.append('df1_mixed', df1)
+ tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
+
+ # panel
+ def _make_one_panel():
+ wp = tm.makePanel()
+ wp['obj1'] = 'foo'
+ wp['obj2'] = 'bar'
+ wp['bool1'] = wp['ItemA'] > 0
+ wp['bool2'] = wp['ItemB'] > 0
+ wp['int1'] = 1
+ wp['int2'] = 2
+ return wp.consolidate()
+ p1 = _make_one_panel()
+
+ self.store.append('p1_mixed', p1)
+ tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
+
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
@@ -174,34 +281,116 @@ def test_remove(self):
self.store.remove('b')
self.assertEquals(len(self.store), 0)
- def test_remove_where_not_exist(self):
- crit1 = {
- 'field' : 'index',
- 'op' : '>',
- 'value' : 'foo'
- }
+ # __delitem__
+ self.store['a'] = ts
+ self.store['b'] = df
+ del self.store['a']
+ del self.store['b']
+ self.assertEquals(len(self.store), 0)
+
+ def test_remove_where(self):
+
+ # non-existance
+ crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
+ # try to remove non-table (with crit)
+ # non-table ok (where = None)
+ wp = tm.makePanel()
+ self.store.put('wp', wp, table=True)
+ self.store.remove('wp', [('column', ['A', 'D'])])
+ rs = self.store.select('wp')
+ expected = wp.reindex(minor_axis = ['B','C'])
+ tm.assert_panel_equal(rs,expected)
+
+ # selectin non-table with a where
+ self.store.put('wp2', wp, table=False)
+ self.assertRaises(Exception, self.store.remove,
+ 'wp2', [('column', ['A', 'D'])])
+
+
def test_remove_crit(self):
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
- crit1 = {
- 'field' : 'index',
- 'op' : '>',
- 'value' : date
- }
- crit2 = {
- 'field' : 'column',
- 'value' : ['A', 'D']
- }
+ crit1 = Term('index','>',date)
+ crit2 = Term('column',['A', 'D'])
self.store.remove('wp', where=[crit1])
self.store.remove('wp', where=[crit2])
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
+ # test non-consecutive row removal
+ wp = tm.makePanel()
+ self.store.put('wp2', wp, table=True)
+
+ date1 = wp.major_axis[1:3]
+ date2 = wp.major_axis[5]
+ date3 = [wp.major_axis[7],wp.major_axis[9]]
+
+ crit1 = Term('index',date1)
+ crit2 = Term('index',date2)
+ crit3 = Term('index',date3)
+
+ self.store.remove('wp2', where=[crit1])
+ self.store.remove('wp2', where=[crit2])
+ self.store.remove('wp2', where=[crit3])
+ result = self.store['wp2']
+
+ ma = list(wp.major_axis)
+ for d in date1:
+ ma.remove(d)
+ ma.remove(date2)
+ for d in date3:
+ ma.remove(d)
+ expected = wp.reindex(major = ma)
+ tm.assert_panel_equal(result, expected)
+
+ def test_terms(self):
+
+ wp = tm.makePanel()
+ self.store.put('wp', wp, table=True)
+
+ # some invalid terms
+ terms = [
+ [ 'minor', ['A','B'] ],
+ [ 'index', ['20121114'] ],
+ [ 'index', ['20121114', '20121114'] ],
+ ]
+ for t in terms:
+ self.assertRaises(Exception, self.store.select, 'wp', t)
+
+ self.assertRaises(Exception, Term.__init__)
+ self.assertRaises(Exception, Term.__init__, 'blah')
+ self.assertRaises(Exception, Term.__init__, 'index')
+ self.assertRaises(Exception, Term.__init__, 'index', '==')
+ self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
+
+ result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
+ expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
+ tm.assert_panel_equal(result, expected)
+
+ # valid terms
+ terms = [
+ dict(field = 'index', op = '>', value = '20121114'),
+ ('index', '20121114'),
+ ('index', '>', '20121114'),
+ (('index', ['20121114','20121114']),),
+ ('index', datetime(2012,11,14)),
+ 'index>20121114',
+ 'major>20121114',
+ 'major_axis>20121114',
+ (('minor', ['A','B']),),
+ (('minor_axis', ['A','B']),),
+ ((('minor_axis', ['A','B']),),),
+ (('column', ['A','B']),),
+ ]
+
+ for t in terms:
+ self.store.select('wp', t)
+
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
@@ -461,10 +650,6 @@ def _make_one():
self.store['obj'] = df2
tm.assert_frame_equal(self.store['obj'], df2)
- # storing in Table not yet supported
- self.assertRaises(Exception, self.store.put, 'foo',
- df1, table=True)
-
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
@@ -521,43 +706,45 @@ def test_overwrite_node(self):
tm.assert_series_equal(self.store['a'], ts)
+ def test_select(self):
+ wp = tm.makePanel()
+
+ # put/select ok
+ self.store.put('wp', wp, table=True)
+ self.store.select('wp')
+
+ # non-table ok (where = None)
+ self.store.put('wp2', wp, table=False)
+ self.store.select('wp2')
+
+ # selectin non-table with a where
+ self.assertRaises(Exception, self.store.select,
+ 'wp2', ('column', ['A', 'D']))
+
def test_panel_select(self):
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
- crit1 = {
- 'field' : 'index',
- 'op' : '>=',
- 'value' : date
- }
- crit2 = {
- 'field' : 'column',
- 'value' : ['A', 'D']
- }
+ crit1 = ('index','>=',date)
+ crit2 = ('column', '=', ['A', 'D'])
result = self.store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
tm.assert_panel_equal(result, expected)
+ result = self.store.select('wp', [ 'major_axis>=20000124', ('minor_axis', '=', ['A','B']) ])
+ expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
+ tm.assert_panel_equal(result, expected)
+
def test_frame_select(self):
df = tm.makeTimeDataFrame()
self.store.put('frame', df, table=True)
date = df.index[len(df) // 2]
- crit1 = {
- 'field' : 'index',
- 'op' : '>=',
- 'value' : date
- }
- crit2 = {
- 'field' : 'column',
- 'value' : ['A', 'D']
- }
- crit3 = {
- 'field' : 'column',
- 'value' : 'A'
- }
+ crit1 = ('index','>=',date)
+ crit2 = ('column',['A', 'D'])
+ crit3 = ('column','A')
result = self.store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
@@ -578,10 +765,7 @@ def test_select_filter_corner(self):
df.columns = ['%.3d' % c for c in df.columns]
self.store.put('frame', df, table=True)
- crit = {
- 'field' : 'column',
- 'value' : df.columns[:75]
- }
+ crit = Term('column', df.columns[:75])
result = self.store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
@@ -641,6 +825,15 @@ def test_legacy_read(self):
store['d']
store.close()
+ def test_legacy_table_read(self):
+ # legacy table types
+ pth = curpath()
+ store = HDFStore(os.path.join(pth, 'legacy_table.h5'), 'r')
+ store.select('df1')
+ store.select('df2')
+ store.select('wp1')
+ store.close()
+
def test_store_datetime_fractional_secs(self):
dt = datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
diff --git a/pandas/src/pytables.pyx b/pandas/src/pytables.pyx
new file mode 100644
index 0000000000000..b4dc4f5995f71
--- /dev/null
+++ b/pandas/src/pytables.pyx
@@ -0,0 +1,97 @@
+### pytables extensions ###
+
+from numpy cimport ndarray, int32_t, float64_t, int64_t
+cimport numpy as np
+
+cimport cython
+
+import numpy as np
+import operator
+import sys
+
+np.import_array()
+np.import_ufunc()
+
+
+from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
+ PyDict_Contains, PyDict_Keys,
+ Py_INCREF, PyTuple_SET_ITEM,
+ PyTuple_SetItem,
+ PyTuple_New,
+ PyObject_SetAttrString)
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def create_hdf_rows_2d(ndarray index, ndarray[np.uint8_t, ndim=1] mask, list values):
+ """ return a list of objects ready to be converted to rec-array format """
+
+ cdef:
+ unsigned int i, b, n_index, n_blocks, tup_size
+ ndarray v
+ list l
+ object tup, val
+
+ n_index = index.shape[0]
+ n_blocks = len(values)
+ tup_size = n_blocks+1
+ l = []
+ for i from 0 <= i < n_index:
+
+ if not mask[i]:
+
+ tup = PyTuple_New(tup_size)
+ val = index[i]
+ PyTuple_SET_ITEM(tup, 0, val)
+ Py_INCREF(val)
+
+ for b from 0 <= b < n_blocks:
+
+ v = values[b][:, i]
+ PyTuple_SET_ITEM(tup, b+1, v)
+ Py_INCREF(v)
+
+ l.append(tup)
+
+ return l
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def create_hdf_rows_3d(ndarray index, ndarray columns, ndarray[np.uint8_t, ndim=2] mask, list values):
+ """ return a list of objects ready to be converted to rec-array format """
+
+ cdef:
+ unsigned int i, j, n_columns, n_index, n_blocks, tup_size
+ ndarray v
+ list l
+ object tup, val
+
+ n_index = index.shape[0]
+ n_columns = columns.shape[0]
+ n_blocks = len(values)
+ tup_size = n_blocks+2
+ l = []
+ for i from 0 <= i < n_index:
+
+ for c from 0 <= c < n_columns:
+
+ if not mask[i, c]:
+
+ tup = PyTuple_New(tup_size)
+
+ val = columns[c]
+ PyTuple_SET_ITEM(tup, 0, val)
+ Py_INCREF(val)
+
+ val = index[i]
+ PyTuple_SET_ITEM(tup, 1, val)
+ Py_INCREF(val)
+
+ for b from 0 <= b < n_blocks:
+
+ v = values[b][:, i, c]
+ PyTuple_SET_ITEM(tup, b+2, v)
+ Py_INCREF(v)
+
+ l.append(tup)
+
+ return l
diff --git a/setup.py b/setup.py
index e31659b3ee15f..ca152588b9554 100755
--- a/setup.py
+++ b/setup.py
@@ -620,6 +620,11 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
sources=[srcpath('sandbox', suffix=suffix)],
include_dirs=common_include)
+pytables_ext = Extension('pandas._pytables',
+ sources=[srcpath('pytables', suffix=suffix)],
+ include_dirs=[np.get_include()],
+ libraries=libraries)
+
cppsandbox_ext = Extension('pandas._cppsandbox',
language='c++',
sources=[srcpath('cppsandbox', suffix=suffix)],
@@ -629,6 +634,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
lib_ext,
period_ext,
sparse_ext,
+ pytables_ext,
parser_ext]
# if not ISRELEASED:
| Refactor of PyTables support to allow multiple table types.
This commit allows for support of multiple table types in a pytables hdf file,
supporting the existing infrastructure in a backwards compatible manner (LegacyTable)
while extending to a slightly modified format to support AppendableTables and future support of WORMTables
AppendableTables are implementations of the current table format with two enhancements:
- mixed dtype support
- writing routines in cython for enhanced performance
WORMTables (not implemented - but pretty straightforward)
these tables can support a fixed 'table' (meaning not-appendable), that is searchable via queries
this would have greatly enhanced write performance compared with AppendableTables, and a similar read performance profile
In addition, the tables allow for arbitrary axes to be indexed (e.g. you could save a panel that allows indexing on major_axis,minor_axis AND items),
so all dimensions are queryable (currently only major/minor axes allow this query)
all tests pass (with 1 exception)
a frame table round-trip - will fail on a comparison of a sorted index of the frame vs the index of the table (which is as written), not sure why this should be the case?
| https://api.github.com/repos/pandas-dev/pandas/pulls/2371 | 2012-11-27T21:30:00Z | 2012-11-29T00:40:18Z | 2012-11-29T00:40:18Z | 2012-11-29T16:47:29Z |
Excelfancy | diff --git a/pandas/core/format.py b/pandas/core/format.py
index d13cee0b24da2..db50955c13c3e 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -17,6 +17,9 @@
import numpy as np
+import itertools
+
+
docstring_to_string = """
Parameters
----------
@@ -400,6 +403,7 @@ def _get_column_name_list(self):
names.append('' if columns.name is None else columns.name)
return names
+
class HTMLFormatter(object):
indent_delta = 2
@@ -674,6 +678,217 @@ def grouper(x):
return result
+
+#from collections import namedtuple
+# ExcelCell = namedtuple("ExcelCell",
+# 'row, col, val, style, mergestart, mergeend')
+
+class ExcelCell:
+ __fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
+ __slots__ = __fields__
+
+ def __init__(self, row, col, val,
+ style=None, mergestart=None, mergeend=None):
+ self.row = row
+ self.col = col
+ self.val = val
+ self.style = style
+ self.mergestart = mergestart
+ self.mergeend = mergeend
+
+
+header_style = {"font": {"bold": True},
+ "borders": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "alignment": {"horizontal": "center"}}
+
+
+class ExcelFormatter(object):
+ """
+ Class for formatting a DataFrame to a list of ExcelCells,
+
+ Parameters
+ ----------
+ df : dataframe
+ na_rep: na representation
+ float_format : string, default None
+ Format string for floating point numbers
+ cols : sequence, optional
+ Columns to write
+ header : boolean or list of string, default True
+ Write out column names. If a list of string is given it is
+ assumed to be aliases for the column names
+ index : boolean, default True
+ output row names (index)
+ index_label : string or sequence, default None
+ Column label for index column(s) if desired. If None is given, and
+ `header` and `index` are True, then the index names are used. A
+ sequence should be given if the DataFrame uses MultiIndex.
+ """
+
+ def __init__(self,
+ df,
+ na_rep='',
+ float_format=None,
+ cols=None,
+ header=True,
+ index=True,
+ index_label=None
+ ):
+ self.df = df
+ self.rowcounter = 0
+ self.na_rep = na_rep
+ self.columns = cols
+ if cols is None:
+ self.columns = df.columns
+ self.float_format = float_format
+ self.index = index
+ self.index_label = index_label
+ self.header = header
+
+ def _format_value(self, val):
+ if lib.checknull(val):
+ val = self.na_rep
+ if self.float_format is not None and com.is_float(val):
+ val = float(self.float_format % val)
+ return val
+
+ def _format_header_mi(self):
+ levels = self.columns.format(sparsify=True, adjoin=False,
+ names=False)
+ level_lenghts = _get_level_lengths(levels)
+ coloffset = 0
+ if isinstance(self.df.index, MultiIndex):
+ coloffset = len(self.df.index[0]) - 1
+
+ for lnum, (records, values) in enumerate(zip(level_lenghts,
+ levels)):
+ name = self.columns.names[lnum]
+ yield ExcelCell(lnum, coloffset, name, header_style)
+ for i in records:
+ if records[i] > 1:
+ yield ExcelCell(lnum,coloffset + i + 1, values[i],
+ header_style, lnum, coloffset + i + records[i])
+ else:
+ yield ExcelCell(lnum, coloffset + i + 1, values[i], header_style)
+
+ self.rowcounter = lnum
+
+ def _format_header_regular(self):
+ has_aliases = isinstance(self.header, (tuple, list, np.ndarray))
+ if has_aliases or self.header:
+ coloffset = 0
+ if self.index:
+ coloffset = 1
+ if isinstance(self.df.index, MultiIndex):
+ coloffset = len(self.df.index[0])
+
+ colnames = self.columns
+ if has_aliases:
+ if len(self.header) != len(self.columns):
+ raise ValueError(('Writing %d cols but got %d aliases'
+ % (len(self.columns), len(self.header))))
+ else:
+ colnames = self.header
+
+ for colindex, colname in enumerate(colnames):
+ yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
+ header_style)
+
+ def _format_header(self):
+ if isinstance(self.columns, MultiIndex):
+ gen = self._format_header_mi()
+ else:
+ gen = self._format_header_regular()
+
+ gen2 = ()
+ if self.df.index.names:
+ row = [x if x is not None else ''
+ for x in self.df.index.names] + [''] * len(self.columns)
+ if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
+ gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
+ for colindex, val in enumerate(row))
+ self.rowcounter += 1
+ return itertools.chain(gen, gen2)
+
+ def _format_body(self):
+
+ if isinstance(self.df.index, MultiIndex):
+ return self._format_hierarchical_rows()
+ else:
+ return self._format_regular_rows()
+
+ def _format_regular_rows(self):
+ self.rowcounter += 1
+
+ coloffset = 0
+ #output index and index_label?
+ if self.index:
+ #chek aliases
+ #if list only take first as this is not a MultiIndex
+ if self.index_label and isinstance(self.index_label,
+ (list, tuple, np.ndarray)):
+ index_label = self.index_label[0]
+ #if string good to go
+ elif self.index_label and isinstance(self.index_label, str):
+ index_label = self.index_label
+ else:
+ index_label = self.df.index.names[0]
+
+ if index_label:
+ yield ExcelCell(self.rowcounter, 0,
+ index_label, header_style)
+ self.rowcounter += 1
+
+ #write index_values
+ index_values = self.df.index
+ coloffset = 1
+ for idx, idxval in enumerate(index_values):
+ yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
+
+ for colidx, colname in enumerate(self.columns):
+ series = self.df[colname]
+ for i, val in enumerate(series):
+ yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
+
+ def _format_hierarchical_rows(self):
+ self.rowcounter += 1
+
+ gcolidx = 0
+ #output index and index_label?
+ if self.index:
+ index_labels = self.df.index.names
+ #check for aliases
+ if self.index_label and isinstance(self.index_label,
+ (list, tuple, np.ndarray)):
+ index_labels = self.index_label
+
+ #if index labels are not empty go ahead and dump
+ if filter(lambda x: x is not None, index_labels):
+ for cidx, name in enumerate(index_labels):
+ yield ExcelCell(self.rowcounter, cidx,
+ name, header_style)
+ self.rowcounter += 1
+
+ for indexcolvals in zip(*self.df.index):
+ for idx, indexcolval in enumerate(indexcolvals):
+ yield ExcelCell(self.rowcounter + idx, gcolidx,
+ indexcolval, header_style)
+ gcolidx += 1
+
+ for colidx, colname in enumerate(self.columns):
+ series = self.df[colname]
+ for i, val in enumerate(series):
+ yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
+
+ def get_formatted_cells(self):
+ for cell in itertools.chain(self._format_header(),
+ self._format_body()):
+ cell.val = self._format_value(cell.val)
+ yield cell
+
#----------------------------------------------------------------------
# Array formatters
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 35d895bed43f1..ebe361a33b28c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1221,7 +1221,7 @@ def to_panel(self):
to_wide = deprecate('to_wide', to_panel)
- def _helper_csvexcel(self, writer, na_rep=None, cols=None,
+ def _helper_csv(self, writer, na_rep=None, cols=None,
header=True, index=True,
index_label=None, float_format=None):
if cols is None:
@@ -1356,7 +1356,7 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
else:
csvout = csv.writer(f, lineterminator='\n', delimiter=sep,
quoting=quoting)
- self._helper_csvexcel(csvout, na_rep=na_rep,
+ self._helper_csv(csvout, na_rep=na_rep,
float_format=float_format, cols=cols,
header=header, index=index,
index_label=index_label)
@@ -1367,7 +1367,7 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
float_format=None, cols=None, header=True, index=True,
- index_label=None):
+ index_label=None, startrow=0, startcol=0):
"""
Write DataFrame to a excel sheet
@@ -1392,6 +1392,9 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
+ startow : upper left cell row to dump data frame
+ startcol : upper left cell column to dump data frame
+
Notes
-----
@@ -1408,11 +1411,17 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
if isinstance(excel_writer, basestring):
excel_writer = ExcelWriter(excel_writer)
need_save = True
- excel_writer.cur_sheet = sheet_name
- self._helper_csvexcel(excel_writer, na_rep=na_rep,
- float_format=float_format, cols=cols,
- header=header, index=index,
- index_label=index_label)
+
+ formatter = fmt.ExcelFormatter(self,
+ na_rep=na_rep,
+ cols=cols,
+ header=header,
+ float_format=float_format,
+ index=index,
+ index_label=index_label)
+ formatted_cells = formatter.get_formatted_cells()
+ excel_writer.write_cells(formatted_cells, sheet_name,
+ startrow=startrow, startcol=startcol)
if need_save:
excel_writer.save()
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a5fc7ebeed101..14a01b38ae88e 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -6,6 +6,7 @@
from itertools import izip
from urlparse import urlparse
import csv
+import xlwt
import numpy as np
@@ -20,6 +21,7 @@
import pandas.lib as lib
import pandas._parser as _parser
+from pandas.tseries.period import Period
class DateConversionError(Exception):
pass
@@ -456,6 +458,8 @@ def __init__(self, f, engine='python', **kwds):
# might mutate self.engine
self.options, self.engine = self._clean_options(options, engine)
+ if 'has_index_labels' in kwds:
+ self.options['has_index_labels'] = kwds['has_index_labels']
self._make_engine(self.engine)
@@ -931,6 +935,9 @@ def TextParser(*args, **kwds):
rows will be discarded
index_col : int or list, default None
Column or columns to use as the (possibly hierarchical) index
+ has_index_labels: boolean, default False
+ True if the cols defined in index_col have an index name and are
+ not in the header
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
@@ -969,6 +976,10 @@ def TextParser(*args, **kwds):
# verbose=False, encoding=None, squeeze=False):
+def count_empty_vals(vals):
+ return sum([1 for v in vals if v == '' or v is None])
+
+
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
@@ -995,6 +1006,9 @@ def __init__(self, f, **kwds):
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
self.quoting = kwds['quoting']
+ self.has_index_labels = False
+ if 'has_index_labels' in kwds:
+ self.has_index_labels = kwds['has_index_labels']
self.verbose = kwds['verbose']
self.converters = kwds['converters']
@@ -1099,6 +1113,13 @@ def read(self, rows=None):
self.index_col,
self.index_names)
+ #handle new style for names in index
+ count_empty_content_vals = count_empty_vals(content[0])
+ indexnamerow = None
+ if self.has_index_labels and count_empty_content_vals == len(columns):
+ indexnamerow = content[0]
+ content = content[1:]
+
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
@@ -1106,6 +1127,9 @@ def read(self, rows=None):
data = self._convert_data(data)
index = self._make_index(data, alldata, columns)
+ if indexnamerow:
+ coffset = len(indexnamerow) - len(columns)
+ index.names = indexnamerow[:coffset]
return index, columns, data
@@ -1699,7 +1723,7 @@ def __repr__(self):
return object.__repr__(self)
def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
+ index_col=None, has_index_labels=False, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
**kwds):
"""
@@ -1718,6 +1742,9 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
+ has_index_labels: boolean, default False
+ True if the cols defined in index_col have an index name and are
+ not in the header
parse_cols : int or list, default None
If None then parse all columns,
If int then indicates last column to be parsed
@@ -1739,6 +1766,7 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
False: self._parse_xls}
return choose[self.use_xlsx](sheetname, header=header,
skiprows=skiprows, index_col=index_col,
+ has_index_labels=has_index_labels,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser,
@@ -1780,7 +1808,7 @@ def _excel2num(x):
return i in parse_cols
def _parse_xlsx(self, sheetname, header=0, skiprows=None,
- skip_footer=0, index_col=None,
+ skip_footer=0, index_col=None, has_index_labels=False,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, chunksize=None):
sheet = self.book.get_sheet_by_name(name=sheetname)
@@ -1804,6 +1832,7 @@ def _parse_xlsx(self, sheetname, header=0, skiprows=None,
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
+ has_index_labels=has_index_labels,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
@@ -1815,7 +1844,7 @@ def _parse_xlsx(self, sheetname, header=0, skiprows=None,
return parser.read()
def _parse_xls(self, sheetname, header=0, skiprows=None,
- skip_footer=0, index_col=None,
+ skip_footer=0, index_col=None, has_index_labels=None,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, chunksize=None):
from xlrd import xldate_as_tuple, XL_CELL_DATE, XL_CELL_ERROR
@@ -1849,6 +1878,7 @@ def _parse_xls(self, sheetname, header=0, skiprows=None,
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
+ has_index_labels=has_index_labels,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
@@ -1869,11 +1899,97 @@ def sheet_names(self):
def _trim_excel_header(row):
# trim header row so auto-index inference works
- while len(row) > 0 and row[0] == '':
+ # xlrd uses '' , openpyxl None
+ while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
+class CellStyleConverter(object):
+ """
+ Utility Class which converts a style dict to xlrd or openpyxl style
+ """
+
+ @staticmethod
+ def to_xls(style_dict):
+ """
+ converts a style_dict to an xlwt style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+ def style_to_xlwt(item, firstlevel=True, field_sep=',', line_sep=';'):
+ """helper wich recursively generate an xlwt easy style string
+ for example:
+
+ hstyle = {"font": {"bold": True},
+ "border": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "align": {"horiz": "center"}}
+ will be converted to
+ font: bold on; \
+ border: top thin, right thin, bottom thin, left thin; \
+ align: horiz center;
+ """
+ if hasattr(item, 'items'):
+ if firstlevel:
+ it = ["%s: %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (line_sep).join(it)
+ return out
+ else:
+ it = ["%s %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (field_sep).join(it)
+ return out
+ else:
+ item = "%s" % item
+ item = item.replace("True", "on")
+ item = item.replace("False", "off")
+ return item
+
+ if style_dict:
+ xlwt_stylestr = style_to_xlwt(style_dict)
+ return xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
+ else:
+ return xlwt.XFStyle()
+
+ @staticmethod
+ def to_xlsx(style_dict):
+ """
+ converts a style_dict to an openpyxl style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+
+ from openpyxl.style import Style
+ xls_style = Style()
+ for key, value in style_dict.items():
+ for nk, nv in value.items():
+ if key == "borders":
+ (xls_style.borders.__getattribute__(nk)
+ .__setattr__('border_style', nv))
+ else:
+ xls_style.__getattribute__(key).__setattr__(nk, nv)
+
+ return xls_style
+
+
+def _conv_value(val):
+ #convert value for excel dump
+ if isinstance(val, np.int64):
+ val = int(val)
+ elif isinstance(val, np.bool8):
+ val = bool(val)
+ elif isinstance(val, Period):
+ val = "%s" % val
+
+ return val
+
+
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
@@ -1890,11 +2006,15 @@ def __init__(self, path):
self.use_xlsx = False
import xlwt
self.book = xlwt.Workbook()
- self.fm_datetime = xlwt.easyxf(num_format_str='YYYY-MM-DD HH:MM:SS')
+ self.fm_datetime = xlwt.easyxf(
+ num_format_str='YYYY-MM-DD HH:MM:SS')
self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
else:
from openpyxl.workbook import Workbook
- self.book = Workbook(optimized_write=True)
+ self.book = Workbook()#optimized_write=True)
+ #open pyxl 1.6.1 adds a dummy sheet remove it
+ if self.book.worksheets:
+ self.book.remove_sheet(self.book.worksheets[0])
self.path = path
self.sheets = {}
self.cur_sheet = None
@@ -1905,16 +2025,18 @@ def save(self):
"""
self.book.save(self.path)
- def writerow(self, row, sheet_name=None):
+ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
"""
- Write the given row into Excel an excel sheet
+ Write given formated cells into Excel an excel sheet
Parameters
----------
- row : list
- Row of data to save to Excel sheet
+ cells : generator
+ cell of formated data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
+ startrow: upper left cell row to dump data frame
+ startcol: upper left cell column to dump data frame
"""
if sheet_name is None:
sheet_name = self.cur_sheet
@@ -1922,49 +2044,69 @@ def writerow(self, row, sheet_name=None):
raise Exception('Must pass explicit sheet_name or set '
'cur_sheet property')
if self.use_xlsx:
- self._writerow_xlsx(row, sheet_name)
+ self._writecells_xlsx(cells, sheet_name, startrow, startcol)
else:
- self._writerow_xls(row, sheet_name)
+ self._writecells_xls(cells, sheet_name, startrow, startcol)
+
+ def _writecells_xlsx(self, cells, sheet_name, startrow, startcol):
+
+ from openpyxl.cell import get_column_letter
- def _writerow_xls(self, row, sheet_name):
if sheet_name in self.sheets:
- sheet, row_idx = self.sheets[sheet_name]
+ wks = self.sheets[sheet_name]
else:
- sheet = self.book.add_sheet(sheet_name)
- row_idx = 0
- sheetrow = sheet.row(row_idx)
- for i, val in enumerate(row):
- if isinstance(val, (datetime.datetime, datetime.date)):
- if isinstance(val, datetime.datetime):
- sheetrow.write(i, val, self.fm_datetime)
- else:
- sheetrow.write(i, val, self.fm_date)
- elif isinstance(val, np.int64):
- sheetrow.write(i, int(val))
- elif isinstance(val, np.bool8):
- sheetrow.write(i, bool(val))
- else:
- sheetrow.write(i, val)
- row_idx += 1
- if row_idx == 1000:
- sheet.flush_row_data()
- self.sheets[sheet_name] = (sheet, row_idx)
-
- def _writerow_xlsx(self, row, sheet_name):
+ wks = self.book.create_sheet()
+ wks.title = sheet_name
+ self.sheets[sheet_name] = wks
+
+ for cell in cells:
+ colletter = get_column_letter(startcol + cell.col + 1)
+ xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
+ xcell.value = _conv_value(cell.val)
+ if cell.style:
+ style = CellStyleConverter.to_xlsx(cell.style)
+ for field in style.__fields__:
+ xcell.style.__setattr__(field,
+ style.__getattribute__(field))
+
+ if isinstance(cell.val, datetime.datetime):
+ xcell.style.number_format.format_code = "YYYY-MM-DD HH:MM:SS"
+ elif isinstance(cell.val, datetime.date):
+ xcell.style.number_format.format_code = "YYYY-MM-DD"
+
+ #merging requires openpyxl latest (works on 1.6.1)
+ #todo add version check
+ if cell.mergestart is not None and cell.mergeend is not None:
+ cletterstart = get_column_letter(startcol + cell.col + 1)
+ cletterend = get_column_letter(startcol + cell.mergeend + 1)
+
+ wks.merge_cells('%s%s:%s%s' % (cletterstart,
+ startrow + cell.row + 1,
+ cletterend,
+ startrow + cell.mergestart + 1))
+
+ def _writecells_xls(self, cells, sheet_name, startrow, startcol):
if sheet_name in self.sheets:
- sheet, row_idx = self.sheets[sheet_name]
+ wks = self.sheets[sheet_name]
else:
- sheet = self.book.create_sheet()
- sheet.title = sheet_name
- row_idx = 0
-
- conv_row = []
- for val in row:
- if isinstance(val, np.int64):
- val = int(val)
- elif isinstance(val, np.bool8):
- val = bool(val)
- conv_row.append(val)
- sheet.append(conv_row)
- row_idx += 1
- self.sheets[sheet_name] = (sheet, row_idx)
+ wks = self.book.add_sheet(sheet_name)
+ self.sheets[sheet_name] = wks
+
+ for cell in cells:
+ val = _conv_value(cell.val)
+ style = CellStyleConverter.to_xls(cell.style)
+ if isinstance(val, datetime.datetime):
+ style.num_format_str = "YYYY-MM-DD HH:MM:SS"
+ elif isinstance(val, datetime.date):
+ style.num_format_str = "YYYY-MM-DD"
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.write_merge(startrow + cell.row,
+ startrow + cell.mergestart,
+ startcol + cell.col,
+ startcol + cell.mergeend,
+ val, style)
+ else:
+ wks.write(startrow + cell.row,
+ startcol + cell.col,
+ val, style)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b76d9ea1e6052..61456d6dbfe2e 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3842,7 +3842,7 @@ def test_to_excel_from_excel(self):
# test roundtrip
self.frame.to_excel(path,'test1')
reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
+ recons = reader.parse('test1', index_col=0, has_index_labels=True)
assert_frame_equal(self.frame, recons)
self.frame.to_excel(path,'test1', index=False)
@@ -3851,19 +3851,19 @@ def test_to_excel_from_excel(self):
recons.index = self.frame.index
assert_frame_equal(self.frame, recons)
- self.frame.to_excel(path,'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0, skiprows=[1])
- assert_frame_equal(self.frame.ix[1:], recons)
+ # self.frame.to_excel(path,'test1')
+ # reader = ExcelFile(path)
+ # recons = reader.parse('test1', index_col=0, skiprows=[2], has_index_labels=True)
+ # assert_frame_equal(self.frame.ix[1:], recons)
self.frame.to_excel(path,'test1',na_rep='NA')
reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0, na_values=['NA'])
+ recons = reader.parse('test1', index_col=0, na_values=['NA'], has_index_labels=True)
assert_frame_equal(self.frame, recons)
self.mixed_frame.to_excel(path,'test1')
reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
+ recons = reader.parse('test1', index_col=0, has_index_labels=True)
assert_frame_equal(self.mixed_frame, recons)
self.tsframe.to_excel(path, 'test1')
@@ -3891,7 +3891,7 @@ def test_to_excel_from_excel(self):
self.tsframe.to_excel(writer,'test2')
writer.save()
reader = ExcelFile(path)
- recons = reader.parse('test1',index_col=0)
+ recons = reader.parse('test1',index_col=0, has_index_labels=True)
assert_frame_equal(self.frame, recons)
recons = reader.parse('test2',index_col=0)
assert_frame_equal(self.tsframe, recons)
@@ -3903,11 +3903,46 @@ def test_to_excel_from_excel(self):
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(path, 'test1', header=col_aliases)
reader = ExcelFile(path)
- rs = reader.parse('test1', index_col=0)
+ rs = reader.parse('test1', index_col=0, has_index_labels=True)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
+ # test index_label
+ frame = (DataFrame(np.random.randn(10,2)) >= 0)
+ frame.to_excel(path, 'test1', index_label=['test'])
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0, has_index_labels=True).astype(np.int64)
+ frame.index.names = ['test']
+ self.assertEqual(frame.index.names, recons.index.names)
+
+ frame = (DataFrame(np.random.randn(10,2)) >= 0)
+ frame.to_excel(path, 'test1', index_label=['test', 'dummy', 'dummy2'])
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0, has_index_labels=True).astype(np.int64)
+ frame.index.names = ['test']
+ self.assertEqual(frame.index.names, recons.index.names)
+
+ frame = (DataFrame(np.random.randn(10,2)) >= 0)
+ frame.to_excel(path, 'test1', index_label='test')
+ reader = ExcelFile(path)
+ recons = reader.parse('test1', index_col=0, has_index_labels=True).astype(np.int64)
+ frame.index.names = ['test']
+ self.assertEqual(frame.index.names, recons.index.names)
+
+ #test index_labels in same row as column names
+ self.frame.to_excel('/tmp/tests.xls', 'test1', cols=['A', 'B', 'C', 'D'], index=False)
+ #take 'A' and 'B' as indexes (they are in same row as cols 'C', 'D')
+ df = self.frame.copy()
+ df = df.set_index(['A', 'B'])
+
+
+ reader = ExcelFile('/tmp/tests.xls')
+ recons = reader.parse('test1', index_col=[0, 1])
+ assert_frame_equal(df, recons)
+
+
+
os.remove(path)
# datetime.date, not sure what to test here exactly
@@ -3971,7 +4006,7 @@ def test_to_excel_multiindex(self):
# round trip
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
- df = reader.parse('test1', index_col=[0,1], parse_dates=False)
+ df = reader.parse('test1', index_col=[0,1], parse_dates=False, has_index_labels=True)
assert_frame_equal(frame, df)
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
@@ -3984,7 +4019,7 @@ def test_to_excel_multiindex(self):
tsframe.to_excel(path, 'test1', index_label = ['time','foo'])
reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=[0,1])
+ recons = reader.parse('test1', index_col=[0,1], has_index_labels=True)
assert_frame_equal(tsframe, recons)
# infer index
@@ -3993,22 +4028,28 @@ def test_to_excel_multiindex(self):
recons = reader.parse('test1')
assert_frame_equal(tsframe, recons)
- # no index
- tsframe.index.names = ['first', 'second']
- tsframe.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1')
- assert_almost_equal(tsframe.values,
- recons.ix[:, tsframe.columns].values)
- self.assertEqual(len(tsframe.columns) + 2, len(recons.columns))
-
- tsframe.index.names = [None, None]
# no index
- tsframe.to_excel(path, 'test1', index=False)
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=None)
- assert_almost_equal(recons.values, self.tsframe.values)
+ #TODO : mention this does not make sence anymore
+ #with the new formatting as we are not alligning colnames and indexlabels
+ #on the same row
+
+ # tsframe.index.names = ['first', 'second']
+ # tsframe.to_excel(path, 'test1')
+ # reader = ExcelFile(path)
+ # recons = reader.parse('test1')
+ # assert_almost_equal(tsframe.values,
+ # recons.ix[:, tsframe.columns].values)
+ # self.assertEqual(len(tsframe.columns) + 2, len(recons.columns))
+
+ # tsframe.index.names = [None, None]
+
+ # # no index
+ # tsframe.to_excel(path, 'test1', index=False)
+ # reader = ExcelFile(path)
+ # recons = reader.parse('test1', index_col=None)
+ # assert_almost_equal(recons.values, self.tsframe.values)
+
self.tsframe.index = old_index # needed if setUP becomes classmethod
# write a big DataFrame
@@ -4071,6 +4112,125 @@ def test_to_excel_unicode_filename(self):
assert_frame_equal(rs, xp)
os.remove(filename)
+ def test_to_excel_styleconverter(self):
+ from pandas.io.parsers import CellStyleConverter
+ try:
+ import xlwt
+ import openpyxl
+ except ImportError:
+ raise nose.SkipTest
+
+ hstyle = {"font": {"bold": True},
+ "borders": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "alignment": {"horizontal": "center"}}
+ xls_style = CellStyleConverter.to_xls(hstyle)
+ self.assertTrue(xls_style.font.bold)
+ self.assertEquals(xlwt.Borders.THIN, xls_style.borders.top)
+ self.assertEquals(xlwt.Borders.THIN, xls_style.borders.right)
+ self.assertEquals(xlwt.Borders.THIN, xls_style.borders.bottom)
+ self.assertEquals(xlwt.Borders.THIN, xls_style.borders.left)
+ self.assertEquals(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz)
+
+ xlsx_style = CellStyleConverter.to_xlsx(hstyle)
+ self.assertTrue(xlsx_style.font.bold)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.top.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.right.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.bottom.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ xlsx_style.borders.left.border_style)
+ self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER,
+ xlsx_style.alignment.horizontal)
+
+ def test_to_excel_header_styling(self):
+
+ import StringIO
+ s = StringIO.StringIO(
+ """Date,ticker,type,value
+ 2001-01-01,x,close,12.2
+ 2001-01-01,x,open ,12.1
+ 2001-01-01,y,close,12.2
+ 2001-01-01,y,open ,12.1
+ 2001-02-01,x,close,12.2
+ 2001-02-01,x,open ,12.1
+ 2001-02-01,y,close,12.2
+ 2001-02-01,y,open ,12.1
+ 2001-03-01,x,close,12.2
+ 2001-03-01,x,open ,12.1
+ 2001-03-01,y,close,12.2
+ 2001-03-01,y,open ,12.1""")
+ df = read_csv(s, parse_dates=["Date"])
+ pdf = df.pivot_table(values="value", rows=["ticker"],
+ cols=["Date", "type"])
+
+ try:
+ import xlrd
+ import openpyxl
+ from openpyxl.cell import get_column_letter
+ except ImportError:
+ raise nose.SkipTest
+
+ filename = '__tmp__.xls'
+ pdf.to_excel(filename, 'test1')
+
+
+ wbk = xlrd.open_workbook(filename,
+ formatting_info=True)
+ self.assertEquals(["test1"], wbk.sheet_names())
+ ws = wbk.sheet_by_name('test1')
+ self.assertEquals([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)],
+ ws.merged_cells)
+ for i in range(0, 2):
+ for j in range(0, 7):
+ xfx = ws.cell_xf_index(0, 0)
+ cell_xf = wbk.xf_list[xfx]
+ font = wbk.font_list
+ self.assertEquals(1, font[cell_xf.font_index].bold)
+ self.assertEquals(1, cell_xf.border.top_line_style)
+ self.assertEquals(1, cell_xf.border.right_line_style)
+ self.assertEquals(1, cell_xf.border.bottom_line_style)
+ self.assertEquals(1, cell_xf.border.left_line_style)
+ self.assertEquals(2, cell_xf.alignment.hor_align)
+
+ os.remove(filename)
+ # test xlsx_styling
+ filename = '__tmp__.xlsx'
+ pdf.to_excel(filename, 'test1')
+
+ wbk = openpyxl.load_workbook(filename)
+ self.assertEquals(["test1"], wbk.get_sheet_names())
+ ws = wbk.get_sheet_by_name('test1')
+
+ xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
+ xlsaddrs += ["A%s" % i for i in range(1, 6)]
+ xlsaddrs += ["B1", "D1", "F1"]
+ for xlsaddr in xlsaddrs:
+ cell = ws.cell(xlsaddr)
+ self.assertTrue(cell.style.font.bold)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ cell.style.borders.top.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ cell.style.borders.right.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ cell.style.borders.bottom.border_style)
+ self.assertEquals(openpyxl.style.Border.BORDER_THIN,
+ cell.style.borders.left.border_style)
+ self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER,
+ cell.style.alignment.horizontal)
+
+ mergedcells_addrs = ["C1", "E1", "G1"]
+ for maddr in mergedcells_addrs:
+ self.assertTrue(ws.cell(maddr).merged)
+
+ os.remove(filename)
+
+
+
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
| adds to export dataframe for excel:
- multiindex (merge cells similar to htmlformatter)
- border
- bold header
- ability to add dataframe in same sheet (startrow, startcol)
http://cl.ly/image/2r102L0E1l23
solves Issue #2294
| https://api.github.com/repos/pandas-dev/pandas/pulls/2370 | 2012-11-27T20:23:26Z | 2012-11-29T19:32:13Z | 2012-11-29T19:32:13Z | 2014-06-16T03:26:35Z |
ENH: partial date slicing for day, hour, and minute resolutions #2306 | diff --git a/pandas/src/datetime.pyx b/pandas/src/datetime.pyx
index 44660cd3bb682..bb5ed79cc4b5d 100644
--- a/pandas/src/datetime.pyx
+++ b/pandas/src/datetime.pyx
@@ -1603,3 +1603,100 @@ cpdef normalize_date(object dt):
return datetime(dt.year, dt.month, dt.day)
else:
raise TypeError('Unrecognized type: %s' % type(dt))
+
+cpdef resolution(ndarray[int64_t] stamps, tz=None):
+ cdef:
+ Py_ssize_t i, n = len(stamps)
+ pandas_datetimestruct dts
+ int reso = D_RESO, curr_reso
+
+ if tz is not None:
+ if isinstance(tz, basestring):
+ tz = pytz.timezone(tz)
+ return _reso_local(stamps, tz)
+ else:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
+ return reso
+
+US_RESO = 0
+S_RESO = 1
+T_RESO = 2
+H_RESO = 3
+D_RESO = 4
+
+cdef inline int _reso_stamp(pandas_datetimestruct *dts):
+ if dts.us != 0:
+ return US_RESO
+ elif dts.sec != 0:
+ return S_RESO
+ elif dts.min != 0:
+ return T_RESO
+ elif dts.hour != 0:
+ return H_RESO
+ return D_RESO
+
+cdef _reso_local(ndarray[int64_t] stamps, object tz):
+ cdef:
+ Py_ssize_t n = len(stamps)
+ int reso = D_RESO, curr_reso
+ ndarray[int64_t] trans, deltas, pos
+ pandas_datetimestruct dts
+
+ if _is_utc(tz):
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
+ elif _is_tzlocal(tz):
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns,
+ &dts)
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, tz)
+ delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ pandas_datetime_to_datetimestruct(stamps[i] + delta,
+ PANDAS_FR_ns, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
+ else:
+ # Adjust datetime64 timestamp, recompute datetimestruct
+ trans = _get_transitions(tz)
+ deltas = _get_deltas(tz)
+ _pos = trans.searchsorted(stamps, side='right') - 1
+ if _pos.dtype != np.int64:
+ _pos = _pos.astype(np.int64)
+ pos = _pos
+
+ # statictzinfo
+ if not hasattr(tz, '_transition_info'):
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i] + deltas[0],
+ PANDAS_FR_ns, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
+ else:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i] + deltas[pos[i]],
+ PANDAS_FR_ns, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
+
+ return reso
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index bc1770d58b0bc..a169992485ff6 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -22,6 +22,24 @@ class FreqGroup(object):
FR_MIN = 8000
FR_SEC = 9000
+class Resolution(object):
+
+ RESO_US = 0
+ RESO_SEC = 1
+ RESO_MIN = 2
+ RESO_HR = 3
+ RESO_DAY = 4
+
+ @classmethod
+ def get_str(cls, reso):
+ return {RESO_US : 'microsecond',
+ RESO_SEC : 'second',
+ RESO_MIN : 'minute',
+ RESO_HR : 'hour',
+ RESO_DAY : 'day'}.get(reso, 'day')
+
+def get_reso_string(reso):
+ return Resolution.get_str(reso)
def get_to_timestamp_base(base):
if base <= FreqGroup.FR_WK:
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6cbfbfa459308..c6eec268ce52b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -8,7 +8,8 @@
from pandas.core.common import isnull
from pandas.core.index import Index, Int64Index
-from pandas.tseries.frequencies import infer_freq, to_offset, get_period_alias
+from pandas.tseries.frequencies import (infer_freq, to_offset, get_period_alias,
+ Resolution, get_reso_string)
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
@@ -1006,6 +1007,23 @@ def _partial_date_slice(self, reso, parsed):
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
+ elif reso == 'day' and self._resolution < Resolution.RESO_DAY:
+ st = datetime(parsed.year, parsed.month, parsed.day)
+ t1 = Timestamp(st)
+ t2 = st + offsets.Day()
+ t2 = Timestamp(Timestamp(t2).value - 1)
+ elif (reso == 'hour' and
+ self._resolution < Resolution.RESO_HR):
+ st = datetime(parsed.year, parsed.month, parsed.day,
+ hour=parsed.hour)
+ t1 = Timestamp(st)
+ t2 = Timestamp(Timestamp(st + offsets.Hour()).value - 1)
+ elif (reso == 'minute' and
+ self._resolution < Resolution.RESO_MIN):
+ st = datetime(parsed.year, parsed.month, parsed.day,
+ hour=parsed.hour, minute=parsed.minute)
+ t1 = Timestamp(st)
+ t2 = Timestamp(Timestamp(st + offsets.Minute()).value - 1)
else:
raise KeyError
@@ -1221,6 +1239,18 @@ def is_normalized(self):
"""
return lib.dates_normalized(self.asi8, self.tz)
+ @cache_readonly
+ def resolution(self):
+ """
+ Returns day, hour, minute, second, or microsecond
+ """
+ reso = self._resolution
+ return get_reso_string(reso)
+
+ @cache_readonly
+ def _resolution(self):
+ return lib.resolution(self.asi8, self.tz)
+
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 85b3654bac70a..b1fa5d53895a0 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1219,4 +1219,3 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
-
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index ef35c44b53772..cce5093e2f46c 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1833,6 +1833,48 @@ def test_partial_slice(self):
expected = s[:'20060228']
assert_series_equal(result, expected)
+ result = s['2005-1-1']
+ self.assert_(result == s.irow(0))
+
+ self.assertRaises(Exception, s.__getitem__, '2004-12-31')
+
+ def test_partial_slice_daily(self):
+ rng = DatetimeIndex(freq='H', start=datetime(2005,1,31), periods=500)
+ s = Series(np.arange(len(rng)), index=rng)
+
+ result = s['2005-1-31']
+ assert_series_equal(result, s.ix[:24])
+
+ self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
+
+ def test_partial_slice_hourly(self):
+ rng = DatetimeIndex(freq='T', start=datetime(2005,1,1,20,0,0),
+ periods=500)
+ s = Series(np.arange(len(rng)), index=rng)
+
+ result = s['2005-1-1']
+ assert_series_equal(result, s.ix[:60*4])
+
+ result = s['2005-1-1 20']
+ assert_series_equal(result, s.ix[:60])
+
+ self.assert_(s['2005-1-1 20:00'] == s.ix[0])
+ self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
+
+ def test_partial_slice_minutely(self):
+ rng = DatetimeIndex(freq='S', start=datetime(2005,1,1,23,59,0),
+ periods=500)
+ s = Series(np.arange(len(rng)), index=rng)
+
+ result = s['2005-1-1 23:59']
+ assert_series_equal(result, s.ix[:60])
+
+ result = s['2005-1-1']
+ assert_series_equal(result, s.ix[:60])
+
+ self.assert_(s['2005-1-1 23:59:00'] == s.ix[0])
+ self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
+
def test_partial_not_monotonic(self):
rng = date_range(datetime(2005,1,1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index befe3444d98bd..7c7ec3845aa54 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -9,7 +9,7 @@
try:
import dateutil
- from dateutil.parser import parse
+ from dateutil.parser import parse, DEFAULTPARSER
from dateutil.relativedelta import relativedelta
# raise exception if dateutil 2.0 install on 2.x platform
@@ -131,6 +131,7 @@ class DateParseError(ValueError):
qpat1 = re.compile(r'(\d)Q(\d\d)')
qpat2 = re.compile(r'(\d\d)Q(\d)')
ypat = re.compile(r'(\d\d\d\d)$')
+has_time = re.compile('(.+)([\s]|T)+(.+)')
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
@@ -226,25 +227,61 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
yearfirst = print_config.date_yearfirst
try:
- parsed = parse(arg, dayfirst=dayfirst, yearfirst=yearfirst)
+ parsed, reso = dateutil_parse(arg, default, dayfirst=dayfirst,
+ yearfirst=yearfirst)
except Exception, e:
raise DateParseError(e)
if parsed is None:
raise DateParseError("Could not parse %s" % arg)
- repl = {}
- reso = 'year'
+ return parsed, parsed, reso # datetime, resolution
+
+def dateutil_parse(timestr, default,
+ ignoretz=False, tzinfos=None,
+ **kwargs):
+ """ lifted from dateutil to get resolution"""
+ res = DEFAULTPARSER._parse(timestr, **kwargs)
+
+ if res is None:
+ raise ValueError, "unknown string format"
+ repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
- value = getattr(parsed, attr)
- if value is not None and value != 0: # or attr in can_be_zero):
+ value = getattr(res, attr)
+ if value is not None:
repl[attr] = value
reso = attr
- ret = default.replace(**repl)
- return ret, parsed, reso # datetime, resolution
+ if reso == 'microsecond' and repl['microsecond'] == 0:
+ reso = 'second'
+ ret = default.replace(**repl)
+ if res.weekday is not None and not res.day:
+ ret = ret+relativedelta.relativedelta(weekday=res.weekday)
+ if not ignoretz:
+ if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
+ if callable(tzinfos):
+ tzdata = tzinfos(res.tzname, res.tzoffset)
+ else:
+ tzdata = tzinfos.get(res.tzname)
+ if isinstance(tzdata, datetime.tzinfo):
+ tzinfo = tzdata
+ elif isinstance(tzdata, basestring):
+ tzinfo = tz.tzstr(tzdata)
+ elif isinstance(tzdata, int):
+ tzinfo = tz.tzoffset(res.tzname, tzdata)
+ else:
+ raise ValueError, "offset must be tzinfo subclass, " \
+ "tz string, or int offset"
+ ret = ret.replace(tzinfo=tzinfo)
+ elif res.tzname and res.tzname in time.tzname:
+ ret = ret.replace(tzinfo=tz.tzlocal())
+ elif res.tzoffset == 0:
+ ret = ret.replace(tzinfo=tz.tzutc())
+ elif res.tzoffset:
+ ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
+ return ret, reso
def _attempt_monthly(val):
pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']
| https://api.github.com/repos/pandas-dev/pandas/pulls/2369 | 2012-11-27T19:03:14Z | 2012-12-02T17:10:49Z | 2012-12-02T17:10:49Z | 2012-12-02T17:10:49Z | |
BLD: temporary workaround for travis numpy/py3 woes | diff --git a/.travis.yml b/.travis.yml
index e90a83257e210..3cfb4af167038 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,17 @@ python:
- 3.2
install:
- - pip install --use-mirrors cython numpy nose pytz python-dateutil
+ - export PYTHONIOENCODING=utf8 # activate venv 1.8.4 "detach" fix
+ - virtualenv --version
+ - whoami
+ - pwd
+ # install 1.7.0b2 for 3.3, and pull a version of numpy git master
+ # with a alternate fix for detach bug as a temporary workaround
+ # for the others.
+ - "if [ $TRAVIS_PYTHON_VERSION == '3.3' ]; then pip uninstall numpy; pip install http://downloads.sourceforge.net/project/numpy/NumPy/1.7.0b2/numpy-1.7.0b2.tar.gz; fi"
+ - "if [ $TRAVIS_PYTHON_VERSION == '3.2' ] || [ $TRAVIS_PYTHON_VERSION == '3.1' ]; then pip install --use-mirrors git+git://github.com/numpy/numpy.git@089bfa5865cd39e2b40099755e8563d8f0d04f5f#egg=numpy; fi"
+ - "if [ ${TRAVIS_PYTHON_VERSION:0:1} == '2' ]; then pip install numpy; fi" # should be nop if pre-installed
+ - pip install --use-mirrors cython nose pytz python-dateutil
script:
- python setup.py build_ext install
| note that py3 is now tested against numpy recent git, not pypi, just until
the travis people sort things out.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2365 | 2012-11-27T06:36:31Z | 2012-11-27T06:51:37Z | 2012-11-27T06:51:37Z | 2013-03-26T12:52:12Z |
BUG: del df[k] with non-unique key | diff --git a/pandas/core/common.py b/pandas/core/common.py
index aa7ed9cd6b76f..c6e58e478ec53 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -699,6 +699,23 @@ def iterpairs(seq):
return itertools.izip(seq_it, seq_it_next)
+def split_ranges(mask):
+ """ Generates tuples of ranges which cover all True value in mask
+
+ >>> list(split_ranges([1,0,0,1,0]))
+ [(0, 1), (3, 4)]
+ """
+ ranges = [(0,len(mask))]
+
+ for pos,val in enumerate(mask):
+ if not val: # this pos should be ommited, split off the prefix range
+ r = ranges.pop()
+ if pos > r[0]: # yield non-zero range
+ yield (r[0],pos)
+ if pos+1 < len(mask): # save the rest for processing
+ ranges.append((pos+1,len(mask)))
+ if ranges:
+ yield ranges[-1]
def indent(string, spaces=4):
dent = ' ' * spaces
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7dd8e4100ef10..035d2531f382f 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -673,7 +673,7 @@ def get_loc(self, key):
Returns
-------
- loc : int
+ loc : int if unique index, possibly slice or mask if not
"""
return self._engine.get_loc(key)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d54154d0e033e..a2329450a5648 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -181,38 +181,26 @@ def delete(self, item):
def split_block_at(self, item):
"""
- Split block around given column, for "deleting" a column without
- having to copy data by returning views on the original array
+ Split block into zero or more blocks around columns with given label,
+ for "deleting" a column without having to copy data by returning views
+ on the original array.
Returns
-------
- leftb, rightb : (Block or None, Block or None)
+ generator of Block
"""
loc = self.items.get_loc(item)
- if len(self.items) == 1:
- # no blocks left
- return None, None
-
- if loc == 0:
- # at front
- left_block = None
- right_block = make_block(self.values[1:], self.items[1:].copy(),
- self.ref_items)
- elif loc == len(self.values) - 1:
- # at back
- left_block = make_block(self.values[:-1], self.items[:-1].copy(),
- self.ref_items)
- right_block = None
- else:
- # in the middle
- left_block = make_block(self.values[:loc],
- self.items[:loc].copy(), self.ref_items)
- right_block = make_block(self.values[loc + 1:],
- self.items[loc + 1:].copy(),
- self.ref_items)
+ if type(loc) == slice or type(loc) == int:
+ mask = [True]*len(self)
+ mask[loc] = False
+ else: # already a mask, inverted
+ mask = -loc
- return left_block, right_block
+ for s,e in com.split_ranges(mask):
+ yield make_block(self.values[s:e],
+ self.items[s:e].copy(),
+ self.ref_items)
def fillna(self, value, inplace=False):
new_values = self.values if inplace else self.values.copy()
@@ -906,9 +894,12 @@ def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
+ self._delete_from_block(i, item)
+ if com._is_bool_indexer(loc): # dupe keys may return mask
+ loc = [i for i,v in enumerate(loc) if v]
+
new_items = self.items.delete(loc)
- self._delete_from_block(i, item)
self.set_items_norename(new_items)
def set(self, item, value):
@@ -970,13 +961,8 @@ def _delete_from_block(self, i, item):
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
- new_left, new_right = block.split_block_at(item)
-
- if new_left is not None:
- self.blocks.append(new_left)
-
- if new_right is not None:
- self.blocks.append(new_right)
+ for b in block.split_block_at(item):
+ self.blocks.append(b)
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 661c3a2a3edd8..dd93666cba0af 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -117,6 +117,35 @@ def test_iterpairs():
assert(result == expected)
+def test_split_ranges():
+ def _bin(x, width):
+ "return int(x) as a base2 string of given width"
+ return ''.join(str((x>>i)&1) for i in xrange(width-1,-1,-1))
+
+ def test_locs(mask):
+ nfalse = sum(np.array(mask) == 0)
+
+ remaining=0
+ for s, e in com.split_ranges(mask):
+ remaining += e-s
+
+ assert 0 not in mask[s:e]
+
+ # make sure the total items covered by the ranges are a complete cover
+ assert remaining + nfalse == len(mask)
+
+ # exhaustively test all possible mask sequences of length 8
+ ncols=8
+ for i in range(2**ncols):
+ cols=map(int,list(_bin(i,ncols))) # count up in base2
+ mask=[cols[i] == 1 for i in range(len(cols))]
+ test_locs(mask)
+
+ # base cases
+ test_locs([])
+ test_locs([0])
+ test_locs([1])
+
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 955cfedd70466..5e77bfa6c5d8c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2978,6 +2978,18 @@ def test_pop(self):
foo = self.frame.pop('foo')
self.assert_('foo' not in self.frame)
+ def test_pop_non_unique_cols(self):
+ df=DataFrame({0:[0,1],1:[0,1],2:[4,5]})
+ df.columns=["a","b","a"]
+
+ res=df.pop("a")
+ self.assertEqual(type(res),DataFrame)
+ self.assertEqual(len(res),2)
+ self.assertEqual(len(df.columns),1)
+ self.assertTrue("b" in df.columns)
+ self.assertFalse("a" in df.columns)
+ self.assertEqual(len(df.index),2)
+
def test_iter(self):
self.assert_(tm.equalContents(list(self.frame), self.frame.columns))
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 0610dc92e2379..31ffcc5832758 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -155,22 +155,22 @@ def test_delete(self):
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
- left, right = self.fblock.split_block_at('a')
- self.assert_(left is None)
- self.assert_(np.array_equal(right.items, ['c', 'e']))
+ bs = list(self.fblock.split_block_at('a'))
+ self.assertEqual(len(bs),1)
+ self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
- left, right = self.fblock.split_block_at('c')
- self.assert_(np.array_equal(left.items, ['a']))
- self.assert_(np.array_equal(right.items, ['e']))
+ bs = list(self.fblock.split_block_at('c'))
+ self.assertEqual(len(bs),2)
+ self.assertTrue(np.array_equal(bs[0].items, ['a']))
+ self.assertTrue(np.array_equal(bs[1].items, ['e']))
- left, right = self.fblock.split_block_at('e')
- self.assert_(np.array_equal(left.items, ['a', 'c']))
- self.assert_(right is None)
+ bs = list(self.fblock.split_block_at('e'))
+ self.assertEqual(len(bs),1)
+ self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
- left, right = bblock.split_block_at('f')
- self.assert_(left is None)
- self.assert_(right is None)
+ bs = list(bblock.split_block_at('f'))
+ self.assertEqual(len(bs),0)
def test_unicode_repr(self):
mat = np.empty((N, 2), dtype=object)
| This touches some delicate functions to be messing with, so I've split off the
underlying logic into a function in common and added an exhaustive test.
Would welcome review none the less.
Also, I tried to test for nagative perf. There must something
wrong with the test since comparing the commit after just adding a test to the baseline,
shows 50% degredation in a bunch of things. As I said, I don't trust this, ideas?
```
timeseries_asof_nan 23.4131 15.7471 1.4868
datetimeindex_normalize 1153.1918 774.8010 1.4884
timeseries_asof 22.3342 14.9964 1.4893
reshape_unstack_simple 4.8106 3.2289 1.4899
read_table_multiple_date_baseline 980.0920 657.3641 1.4909
timeseries_1min_5min_mean 0.8087 0.5422 1.4915
timeseries_timestamp_tzinfo_cons 0.0210 0.0141 1.4922
match_strings 0.5718 0.3817 1.4982
timeseries_large_lookup_value 0.0301 0.0201 1.4998
reindex_fillna_pad 0.1828 0.1217 1.5018
timeseries_to_datetime_iso8601 5.6048 3.7270 1.5038
read_table_multiple_date 2169.5440 1441.5460 1.5050
reindex_daterange_pad 0.2608 0.1731 1.5066
timeseries_1min_5min_ohlc 0.8046 0.5331 1.5092
reindex_daterange_backfill 0.2466 0.1634 1.5094
period_setitem 1166.9910 772.4509 1.5108
reindex_fillna_backfill 0.1790 0.1183 1.5130
timeseries_asof_single 0.0656 0.0434 1.5136
append_frame_single_mixed 2.0747 1.3697 1.5147
timeseries_slice_minutely 0.0762 0.0501 1.5208
Columns: test_name | target_duration [ms] | baseline_duration [ms] | ratio
(01bc3e0 against 81169f9)
- a Ratio of 1.30 means the target commit is 30% slower then the baseline.
```
closes #2347
| https://api.github.com/repos/pandas-dev/pandas/pulls/2349 | 2012-11-24T21:51:27Z | 2012-11-25T19:17:47Z | 2012-11-25T19:17:47Z | 2014-06-12T16:15:38Z |
Fixes for #1000, to_string(), to_html() should respect col_space | diff --git a/doc/source/io.rst b/doc/source/io.rst
index f74120ad7ef57..3fbc45dda8fa4 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -668,7 +668,7 @@ over the string representation of the object. All arguments are optional:
- ``buf`` default None, for example a StringIO object
- ``columns`` default None, which columns to write
- - ``col_space`` default None, number of spaces to write between columns
+ - ``col_space`` default None, minimum width of each column.
- ``na_rep`` default ``NaN``, representation of NA value
- ``formatters`` default None, a dictionary (by column) of functions each of
which takes a single argument and returns a formatted string
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 13e504a8e1f88..841500329d4a9 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -27,7 +27,7 @@
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
- the width of each columns
+ the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
@@ -215,7 +215,7 @@ def _to_str_columns(self, force_unicode=False):
fmt_values = self._format_col(i)
cheader = str_columns[i]
- max_colwidth = max(_strlen(x) for x in cheader)
+ max_colwidth = max(self.col_space or 0, *(_strlen(x) for x in cheader))
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth)
@@ -434,6 +434,11 @@ def write(self, s, indent=0):
self.elements.append(' ' * indent + com.pprint_thing(s))
def write_th(self, s, indent=0, tags=None):
+ if (self.fmt.col_space is not None
+ and self.fmt.col_space > 0 ):
+ tags = (tags or "" )
+ tags += 'style="min-width: %s;"' % self.fmt.col_space
+
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 10bb75bfbb5b6..8d0dacf2e7edd 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -182,6 +182,30 @@ def test_to_string_buffer_all_unicode(self):
# this should work
buf.getvalue()
+ def test_to_string_with_col_space(self):
+ df = DataFrame(np.random.random(size=(1,3)))
+ c10=len(df.to_string(col_space=10).split("\n")[1])
+ c20=len(df.to_string(col_space=20).split("\n")[1])
+ c30=len(df.to_string(col_space=30).split("\n")[1])
+ self.assertTrue( c10 < c20 < c30 )
+
+ def test_to_html_with_col_space(self):
+ def check_with_width(df,col_space):
+ import re
+ # check that col_space affects HTML generation
+ # and be very brittle about it.
+ html = df.to_html(col_space=col_space)
+ hdrs = [x for x in html.split("\n") if re.search("<th[>\s]",x)]
+ self.assertTrue(len(hdrs) > 0 )
+ for h in hdrs:
+ self.assertTrue("min-width" in h )
+ self.assertTrue(str(col_space) in h )
+
+ df = DataFrame(np.random.random(size=(1,3)))
+
+ check_with_width(df,30)
+ check_with_width(df,50)
+
def test_to_html_unicode(self):
# it works!
df = DataFrame({u'\u03c3' : np.arange(10.)})
| Note that the semantics of `col_space` are different in each case, characters vs. pixels,
but that's reasonable.
I hope this doesn't presage more html configuration via kwd arguments, yonder way
madness lies.
also, there's a discrepency between the docstring and [io.rst](https://github.com/pydata/pandas/blame/master/doc/source/io.rst#L671) (github doesn't seem to jump to the right line)
the first defines it as the width of the columns, the latter as the number of spaces between columns,
I adopted the former, since space between columns does not translate as easily to html.
_sigh_, also, `colSpace` has already been deprecated in favor of `col_space`, so
I would feel bad about deprecating `col_space` in favor of `min_col_width`.
I'll leave that decision to braver, fearless souls.
closes #1000.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2328 | 2012-11-22T17:26:09Z | 2012-11-29T20:32:03Z | 2012-11-29T20:32:02Z | 2014-06-25T18:33:51Z |
CLN: Dropped python 2.5 support | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 30f6b66a2b7d3..71d767a57bccb 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -8,13 +8,6 @@
import itertools
-try:
- next
-except NameError: # pragma: no cover
- # Python < 2.6
- def next(x):
- return x.next()
-
from numpy.lib.format import read_array, write_array
import numpy as np
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f7f296e822e15..44b31926f4e15 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1,5 +1,3 @@
-from __future__ import with_statement
-
"""
DataFrame
---------
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 52c8c4aa65a13..a5fc7ebeed101 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -7,13 +7,6 @@
from urlparse import urlparse
import csv
-try:
- next
-except NameError: # pragma: no cover
- # Python < 2.6
- def next(x):
- return x.next()
-
import numpy as np
from pandas.core.index import Index, MultiIndex
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 9442f274a7810..afd05610e3427 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1,5 +1,3 @@
-from __future__ import with_statement
-
import nose
import unittest
import os
@@ -723,4 +721,3 @@ def _test_sort(obj):
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
-
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 44c40b6930784..661c3a2a3edd8 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -1,4 +1,3 @@
-from __future__ import with_statement
from datetime import datetime
import sys
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 708f8143de3d5..05f9e51150850 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1,5 +1,3 @@
-from __future__ import with_statement
-
import nose
import os
import string
@@ -460,9 +458,9 @@ def test_parallel_coordinates(self):
path = os.path.join(curpath(), 'data/iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
- _check_plot_works(parallel_coordinates, df, 'Name',
+ _check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
- _check_plot_works(parallel_coordinates, df, 'Name',
+ _check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
@slow
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 86feb68052f67..6cec53eff382f 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1,5 +1,4 @@
# pylint: disable-msg=E1101,W0612
-from __future__ import with_statement # for Python 2.5
import pandas.util.compat as itertools
from datetime import datetime, time, timedelta
import sys
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 688e4945b6eb3..5da018b54ad4a 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -1,5 +1,4 @@
# pylint: disable-msg=E1101,W0612
-from __future__ import with_statement # for Python 2.5
from datetime import datetime, time, timedelta, tzinfo
import sys
import os
diff --git a/tox.ini b/tox.ini
index 7d09b3aa887e1..9f8e1af8ae924 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py25, py26, py27, py31, py32
+envlist = py26, py27, py31, py32
[testenv]
deps =
@@ -35,15 +35,6 @@ commands =
# tox should provide a preinstall-commands hook.
pip uninstall pandas -qy
-
-[testenv:py25]
-deps =
- cython
- numpy >= 1.6.1
- nose
- pytz
- simplejson
-
[testenv:py26]
[testenv:py27]
diff --git a/tox_prll.ini b/tox_prll.ini
index 85856db064ca3..70edffac717a2 100644
--- a/tox_prll.ini
+++ b/tox_prll.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py25, py26, py27, py31, py32
+envlist = py26, py27, py31, py32
sdistsrc = {env:DISTFILE}
[testenv]
@@ -36,15 +36,6 @@ commands =
# tox should provide a preinstall-commands hook.
pip uninstall pandas -qy
-
-[testenv:py25]
-deps =
- cython
- numpy >= 1.6.1
- nose
- pytz
- simplejson
-
[testenv:py26]
[testenv:py27]
| https://api.github.com/repos/pandas-dev/pandas/pulls/2323 | 2012-11-22T00:47:48Z | 2012-11-23T04:52:17Z | 2012-11-23T04:52:17Z | 2014-06-19T10:32:41Z | |
BUG: dtype=object should stop conversion from object in frame constructo... | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 14b435e0aafc8..04f72de80d500 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -402,8 +402,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
index = _get_names_from_index(data)
if isinstance(data[0], (list, tuple, dict, Series)):
- arrays, columns = _to_arrays(data, columns)
-
+ arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
if index is None:
@@ -5159,7 +5158,7 @@ def _rec_to_dict(arr):
return columns, sdict
-def _to_arrays(data, columns, coerce_float=False):
+def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
@@ -5167,30 +5166,35 @@ def _to_arrays(data, columns, coerce_float=False):
if len(data) == 0:
return [], columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
- return _list_to_arrays(data, columns, coerce_float=coerce_float)
+ return _list_to_arrays(data, columns, coerce_float=coerce_float,
+ dtype=dtype)
elif isinstance(data[0], dict):
return _list_of_dict_to_arrays(data, columns,
- coerce_float=coerce_float)
+ coerce_float=coerce_float,
+ dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
- coerce_float=coerce_float)
+ coerce_float=coerce_float,
+ dtype=dtype)
else:
# last ditch effort
data = map(tuple, data)
- return _list_to_arrays(data, columns, coerce_float=coerce_float)
+ return _list_to_arrays(data, columns,
+ coerce_float=coerce_float,
+ dtype=dtype)
-def _list_to_arrays(data, columns, coerce_float=False):
+def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
- return _convert_object_array(content, columns,
+ return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
-def _list_of_series_to_arrays(data, columns, coerce_float=False):
+def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
@@ -5211,13 +5215,13 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False):
if values.dtype == np.object_:
content = list(values.T)
- return _convert_object_array(content, columns,
+ return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
-def _list_of_dict_to_arrays(data, columns, coerce_float=False):
+def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (x.keys() for x in data)
columns = lib.fast_unique_multiple_list_gen(gen)
@@ -5228,11 +5232,11 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False):
for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
- return _convert_object_array(content, columns,
+ return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
-def _convert_object_array(content, columns, coerce_float=False):
+def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
@@ -5241,6 +5245,7 @@ def _convert_object_array(content, columns, coerce_float=False):
'columns' % (len(columns), len(content)))
arrays = [lib.maybe_convert_objects(arr, try_float=coerce_float)
+ if dtype != object and dtype != np.object else arr
for arr in content]
return arrays, columns
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 042c744ef167a..ce9bfc2af1198 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1730,6 +1730,12 @@ def test_constructor_dtype_nocast_view(self):
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
+ def test_constructor_dtype_list_data(self):
+ df = DataFrame([[1, '2'],
+ [None, 'a']], dtype=object)
+ self.assert_(df.ix[1, 0] is None)
+ self.assert_(df.ix[0, 1] == '2')
+
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
| ...r #2255
don't call maybe_convert_objects in _convert_object_array if dtype is object.
Note that it also stops other type conversions too.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2291 | 2012-11-19T19:22:53Z | 2012-11-24T00:31:38Z | 2012-11-24T00:31:38Z | 2014-06-25T01:09:13Z |
Support for customizing parallel_plot() x axis tickmarks | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 0724799ced6f2..b4bfab7a5d8dd 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -460,6 +460,15 @@ def test_parallel_coordinates(self):
path = os.path.join(curpath(), 'data/iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
+ _check_plot_works(parallel_coordinates, df, 'Name',
+ colors=('#556270', '#4ECDC4', '#C7F464'))
+ _check_plot_works(parallel_coordinates, df, 'Name',
+ colors=['dodgerblue', 'aquamarine', 'seagreen'])
+
+ df = read_csv(path, header=None, skiprows=1, names=[1,2,4,8, 'Name'])
+ _check_plot_works(parallel_coordinates, df, 'Name', use_columns=True)
+ _check_plot_works(parallel_coordinates, df, 'Name',
+ xticks=[1, 5, 25, 125])
@slow
def test_radviz(self):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 2e6faf5eb9362..aec7081c57352 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -411,20 +411,41 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
return fig
-def parallel_coordinates(data, class_column, cols=None, ax=None, **kwds):
+def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None,
+ use_columns=False, xticks=None, **kwds):
"""Parallel coordinates plotting.
- Parameters:
- -----------
- data: A DataFrame containing data to be plotted
- class_column: Column name containing class names
- cols: A list of column names to use, optional
- ax: matplotlib axis object, optional
- kwds: A list of keywords for matplotlib plot method
+ Parameters
+ ----------
+ data: DataFrame
+ A DataFrame containing data to be plotted
+ class_column: str
+ Column name containing class names
+ cols: list, optional
+ A list of column names to use
+ ax: matplotlib.axis, optional
+ matplotlib axis object
+ colors: list or tuple, optional
+ Colors to use for the different classes
+ use_columns: bool, optional
+ If true, columns will be used as xticks
+ xticks: list or tuple, optional
+ A list of values to use for xticks
+ kwds: list, optional
+ A list of keywords for matplotlib plot method
- Returns:
- --------
+ Returns
+ -------
ax: matplotlib axis object
+
+ Examples
+ --------
+ >>> from pandas import read_csv
+ >>> from pandas.tools.plotting import parallel_coordinates
+ >>> from matplotlib import pyplot as plt
+ >>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
+ >>> parallel_coordinates(df, 'Name', colors=('#556270', '#4ECDC4', '#C7F464'))
+ >>> plt.show()
"""
import matplotlib.pyplot as plt
import random
@@ -444,11 +465,32 @@ def random_color(column):
used_legends = set([])
ncols = len(df.columns)
- x = range(ncols)
+
+ # determine values to use for xticks
+ if use_columns is True:
+ if not np.all(np.isreal(list(df.columns))):
+ raise ValueError('Columns must be numeric to be used as xticks')
+ x = df.columns
+ elif xticks is not None:
+ if not np.all(np.isreal(xticks)):
+ raise ValueError('xticks specified must be numeric')
+ elif len(xticks) != ncols:
+ raise ValueError('Length of xticks must match number of columns')
+ x = xticks
+ else:
+ x = range(ncols)
if ax == None:
ax = plt.gca()
+ # if user has not specified colors to use, choose at random
+ if colors is None:
+ colors = dict((kls, random_color(kls)) for kls in classes)
+ else:
+ if len(colors) != len(classes):
+ raise ValueError('Number of colors must match number of classes')
+ colors = dict((kls, colors[i]) for i, kls in enumerate(classes))
+
for i in range(n):
row = df.irow(i).values
y = row
@@ -456,16 +498,17 @@ def random_color(column):
if com.pprint_thing(kls) not in used_legends:
label = com.pprint_thing(kls)
used_legends.add(label)
- ax.plot(x, y, color=random_color(kls),
+ ax.plot(x, y, color=colors[kls],
label=label, **kwds)
else:
- ax.plot(x, y, color=random_color(kls), **kwds)
+ ax.plot(x, y, color=colors[kls], **kwds)
- for i in range(ncols):
+ for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
+ ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
| If the columns are already numeric, then you can simply use "use_columns=True" to have x-axis scaled according to those values. Otherwise the "xticks" parameter can be used to manually specify xticks to use.
Feedback or suggestions are welcome!
| https://api.github.com/repos/pandas-dev/pandas/pulls/2287 | 2012-11-19T03:26:26Z | 2012-12-11T20:34:24Z | 2012-12-11T20:34:24Z | 2012-12-11T20:34:24Z |
ENH: google analytics integration using oauth2 | diff --git a/.gitignore b/.gitignore
index d17c869c4c1ba..320f03a0171a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,3 +23,5 @@ scikits
pandas.egg-info
*\#*\#
.tox
+pandas/io/*.dat
+pandas/io/*.json
\ No newline at end of file
diff --git a/pandas/io/auth.py b/pandas/io/auth.py
new file mode 100644
index 0000000000000..471436cb1b6bf
--- /dev/null
+++ b/pandas/io/auth.py
@@ -0,0 +1,122 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import logging
+
+import httplib2
+
+import apiclient.discovery as gapi
+import gflags
+import oauth2client.file as auth_file
+import oauth2client.client as oauth
+import oauth2client.tools as tools
+OOB_CALLBACK_URN = oauth.OOB_CALLBACK_URN
+
+class AuthenticationConfigError(ValueError):
+ pass
+
+FLOWS = {}
+FLAGS = gflags.FLAGS
+DEFAULT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
+DEFAULT_SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
+DEFAULT_TOKEN_FILE = os.path.join(os.path.dirname(__file__), 'analytics.dat')
+MISSING_CLIENT_MSG = """
+WARNING: Please configure OAuth 2.0
+
+You need to populate the client_secrets.json file found at:
+
+ %s
+
+with information from the APIs Console <https://code.google.com/apis/console>.
+
+"""
+DOC_URL = ('https://developers.google.com/api-client-library/python/guide/'
+ 'aaa_client_secrets')
+
+gflags.DEFINE_enum('logging_level', 'ERROR',
+ ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ 'Set the level of logging detail.')
+
+# Name of file that will store the access and refresh tokens to access
+# the API without having to login each time. Make sure this file is in
+# a secure place.
+
+def process_flags(flags=[]):
+ """Uses the command-line flags to set the logging level.
+
+ Args:
+ argv: List of command line arguments passed to the python script.
+ """
+
+ # Let the gflags module process the command-line arguments.
+ try:
+ FLAGS(flags)
+ except gflags.FlagsError, e:
+ print '%s\nUsage: %s ARGS\n%s' % (e, str(flags), FLAGS)
+ sys.exit(1)
+
+ # Set the logging according to the command-line flag.
+ logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
+
+def get_flow(secret, scope, redirect):
+ """
+ Retrieve an authentication flow object based on the given
+ configuration in the secret file name, the authentication scope,
+ and a redirect URN
+ """
+ key = (secret, scope, redirect)
+ flow = FLOWS.get(key, None)
+ if flow is None:
+ msg = MISSING_CLIENT_MSG % secret
+ if not os.path.exists(secret):
+ raise AuthenticationConfigError(msg)
+ flow = oauth.flow_from_clientsecrets(secret, scope,
+ redirect_uri=redirect,
+ message=msg)
+ FLOWS[key] = flow
+ return flow
+
+def make_token_store(fpath=None):
+ """create token storage from give file name"""
+ if fpath is None:
+ fpath = DEFAULT_TOKEN_FILE
+ return auth_file.Storage(fpath)
+
+def authenticate(flow, storage=None):
+ """
+ Try to retrieve a valid set of credentials from the token store if possible
+ Otherwise use the given authentication flow to obtain new credentials
+ and return an authenticated http object
+
+ Parameters
+ ----------
+ flow : authentication workflow
+ storage: token storage, default None
+ """
+ http = httplib2.Http()
+
+ # Prepare credentials, and authorize HTTP object with them.
+ credentials = storage.get()
+ if credentials is None or credentials.invalid:
+ credentials = tools.run(flow, storage)
+
+ http = credentials.authorize(http)
+ return http
+
+def init_service(http):
+ """
+ Use the given http object to build the analytics service object
+ """
+ return gapi.build('analytics', 'v3', http=http)
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
new file mode 100644
index 0000000000000..a433a4add7478
--- /dev/null
+++ b/pandas/io/ga.py
@@ -0,0 +1,429 @@
+"""
+1. Goto https://code.google.com/apis/console
+2. Create new project
+3. Goto APIs and register for OAuth2.0 for installed applications
+4. Download JSON secret file and move into same directory as this file
+"""
+from datetime import datetime
+import numpy as np
+from pandas import DataFrame
+import pandas as pd
+import pandas.io.parsers as psr
+import pandas.lib as lib
+from pandas.io.date_converters import generic_parser
+import pandas.io.auth as auth
+from pandas.util.decorators import Appender, Substitution
+
+from apiclient.errors import HttpError
+from oauth2client.client import AccessTokenRefreshError
+
+TYPE_MAP = {u'INTEGER': int, u'FLOAT': float, u'TIME': int}
+
+NO_CALLBACK = auth.OOB_CALLBACK_URN
+DOC_URL = auth.DOC_URL
+
+_QUERY_PARAMS = """metrics : list of str
+ Un-prefixed metric names (e.g., 'visitors' and not 'ga:visitors')
+dimensions : list of str
+ Un-prefixed dimension variable names
+start_date : str/date/datetime
+end_date : str/date/datetime, optional
+ Defaults to today
+segment : list of str, optional
+filters : list of str, optional
+start_index : int, default 1
+max_results : int, default 10000
+ If >10000, must specify chunksize or ValueError will be raised"""
+
+_QUERY_DOC = """
+Construct a google analytics query using given parameters
+Metrics and dimensions do not need the 'ga:' prefix
+
+Parameters
+----------
+profile_id : str
+%s
+""" % _QUERY_PARAMS
+
+_GA_READER_DOC = """Given query parameters, return a DataFrame with all the data
+or an iterator that returns DataFrames containing chunks of the data
+
+Parameters
+----------
+%s
+sort : bool/list, default True
+ Sort output by index or list of columns
+chunksize : int, optional
+ If max_results >10000, specifies the number of rows per iteration
+index_col : str/list of str/dict, optional
+ If unspecified then dimension variables are set as index
+parse_dates : bool/list/dict, default True
+keep_date_col : boolean, default False
+date_parser : optional
+na_values : optional
+converters : optional
+dayfirst : bool, default False
+ Informs date parsing
+account_name : str, optional
+account_id : str, optional
+property_name : str, optional
+property_id : str, optional
+profile_name : str, optional
+profile_id : str, optional
+%%(extras)s
+Returns
+-------
+data : DataFrame or DataFrame yielding iterator
+""" % _QUERY_PARAMS
+
+_AUTH_PARAMS = """secrets : str, optional
+ File path to the secrets file
+scope : str, optional
+ Authentication scope
+token_file_name : str, optional
+ Path to token storage
+redirect : str, optional
+ Local host redirect if unspecified
+"""
+
+@Substitution(extras=_AUTH_PARAMS)
+@Appender(_GA_READER_DOC)
+def read_ga(metrics, dimensions, start_date, **kwargs):
+ lst = ['secrets', 'scope', 'token_file_name', 'redirect']
+ reader_kwds = dict((p, kwargs.pop(p)) for p in lst if p in kwargs)
+ reader = GAnalytics(**reader_kwds)
+ return reader.get_data(metrics=metrics, start_date=start_date,
+ dimensions=dimensions, **kwargs)
+
+class OAuthDataReader(object):
+ """
+ Abstract class for handling OAuth2 authentication using the Google
+ oauth2client library
+ """
+ def __init__(self, scope, token_file_name, redirect):
+ """
+ Parameters
+ ----------
+ scope : str
+ Designates the authentication scope
+ token_file_name : str
+ Location of cache for authenticated tokens
+ redirect : str
+ Redirect URL
+ """
+ self.scope = scope
+ self.token_store = auth.make_token_store(token_file_name)
+ self.redirect_url = redirect
+
+ def authenticate(self, secrets):
+ """
+ Run the authentication process and return an authorized
+ http object
+
+ Parameters
+ ----------
+ secrets : str
+ File name for client secrets
+
+ Notes
+ -----
+ See google documention for format of secrets file
+ %s
+ """ % DOC_URL
+ flow = self._create_flow(secrets)
+ return auth.authenticate(flow, self.token_store)
+
+ def _create_flow(self, secrets):
+ """
+ Create an authentication flow based on the secrets file
+
+ Parameters
+ ----------
+ secrets : str
+ File name for client secrets
+
+ Notes
+ -----
+ See google documentation for format of secrets file
+ %s
+ """ % DOC_URL
+ return auth.get_flow(secrets, self.scope, self.redirect_url)
+
+
+class GDataReader(OAuthDataReader):
+ """
+ Abstract class for reading data from google APIs using OAuth2
+ Subclasses must implement create_query method
+ """
+ def __init__(self, scope=auth.DEFAULT_SCOPE,
+ token_file_name=auth.DEFAULT_TOKEN_FILE,
+ redirect=NO_CALLBACK, secrets=auth.DEFAULT_SECRETS):
+ super(GDataReader, self).__init__(scope, token_file_name, redirect)
+ self._service = self._init_service(secrets)
+
+ @property
+ def service(self):
+ """The authenticated request service object"""
+ return self._service
+
+ def _init_service(self, secrets):
+ """
+ Build an authenticated google api request service using the given
+ secrets file
+ """
+ http = self.authenticate(secrets)
+ return auth.init_service(http)
+
+ def get_account(self, name=None, id=None, **kwargs):
+ """
+ Retrieve an account that matches the name, id, or some account attribute
+ specified in **kwargs
+
+ Parameters
+ ----------
+ name : str, optional
+ id : str, optional
+ """
+ accounts = self.service.management().accounts().list().execute()
+ return _get_match(accounts, name, id, **kwargs)
+
+ def get_web_property(self, account_id=None, name=None, id=None, **kwargs):
+ """
+ Retrieve a web property given and account and property name, id, or
+ custom attribute
+
+ Parameters
+ ----------
+ account_id : str, optional
+ name : str, optional
+ id : str, optional
+ """
+ prop_store = self.service.management().webproperties()
+ kwds = {}
+ if account_id is not None:
+ kwds['accountId'] = account_id
+ prop_for_acct = prop_store.list(**kwds).execute()
+ return _get_match(prop_for_acct, name, id, **kwargs)
+
+ def get_profile(self, account_id=None, web_property_id=None, name=None,
+ id=None, **kwargs):
+
+ """
+ Retrieve the right profile for the given account, web property, and
+ profile attribute (name, id, or arbitrary parameter in kwargs)
+
+ Parameters
+ ----------
+ account_id : str, optional
+ web_property_id : str, optional
+ name : str, optional
+ id : str, optional
+ """
+ profile_store = self.service.management().profiles()
+ kwds = {}
+ if account_id is not None:
+ kwds['accountId'] = account_id
+ if web_property_id is not None:
+ kwds['webPropertyId'] = web_property_id
+ profiles = profile_store.list(**kwds).execute()
+ return _get_match(profiles, name, id, **kwargs)
+
+ def create_query(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ @Substitution(extras='')
+ @Appender(_GA_READER_DOC)
+ def get_data(self, metrics, start_date, end_date=None,
+ dimensions=None, segment=None, filters=None, start_index=1,
+ max_results=10000, index_col=None, parse_dates=True,
+ keep_date_col=False, date_parser=None, na_values=None,
+ converters=None, sort=True, dayfirst=False,
+ account_name=None, account_id=None, property_name=None,
+ property_id=None, profile_name=None, profile_id=None,
+ chunksize=None):
+ if chunksize is None and max_results > 10000:
+ raise ValueError('Google API returns maximum of 10,000 rows, '
+ 'please set chunksize')
+
+ account = self.get_account(account_name, account_id)
+ web_property = self.get_web_property(account.get('id'), property_name,
+ property_id)
+ profile = self.get_profile(account.get('id'), web_property.get('id'),
+ profile_name, profile_id)
+
+ profile_id = profile.get('id')
+
+ if index_col is None and dimensions is not None:
+ if isinstance(dimensions, basestring):
+ dimensions = [dimensions]
+ index_col = _clean_index(list(dimensions), parse_dates)
+
+ def _read(start, result_size):
+ query = self.create_query(profile_id, metrics, start_date,
+ end_date=end_date, dimensions=dimensions,
+ segment=segment, filters=filters,
+ start_index=start,
+ max_results=result_size)
+
+ try:
+ rs = query.execute()
+ rows = rs.get('rows', [])
+ col_info = rs.get('columnHeaders', [])
+ return self._parse_data(rows, col_info, index_col,
+ parse_dates=parse_dates,
+ keep_date_col=keep_date_col,
+ date_parser=date_parser,
+ dayfirst=dayfirst,
+ na_values=na_values,
+ converters=converters, sort=sort)
+ except HttpError, inst:
+ raise ValueError('Google API error %s: %s' % (inst.resp.status,
+ inst._get_reason()))
+
+
+ if chunksize is None:
+ return _read(start_index, max_results)
+
+ def iterator():
+ curr_start = start_index
+
+ while curr_start < max_results:
+ yield _read(curr_start, chunksize)
+ curr_start += chunksize
+ return iterator()
+
+ def _parse_data(self, rows, col_info, index_col, parse_dates=True,
+ keep_date_col=False, date_parser=None, dayfirst=False,
+ na_values=None, converters=None, sort=True):
+ # TODO use returned column types
+ col_names = _get_col_names(col_info)
+ df = psr._read(rows, dict(index_col=index_col, parse_dates=parse_dates,
+ date_parser=date_parser, dayfirst=dayfirst,
+ na_values=na_values,
+ keep_date_col=keep_date_col,
+ converters=converters,
+ header=None, names=col_names))
+
+ if isinstance(sort, bool) and sort:
+ return df.sort_index()
+ elif isinstance(sort, (basestring, list, tuple, np.ndarray)):
+ return df.sort_index(by=sort)
+
+ return df
+
+
+class GAnalytics(GDataReader):
+
+ @Appender(_QUERY_DOC)
+ def create_query(self, profile_id, metrics, start_date, end_date=None,
+ dimensions=None, segment=None, filters=None,
+ start_index=None, max_results=10000, **kwargs):
+ qry = format_query(profile_id, metrics, start_date, end_date=end_date,
+ dimensions=dimensions, segment=segment,
+ filters=filters, start_index=start_index,
+ max_results=max_results, **kwargs)
+ try:
+ return self.service.data().ga().get(**qry)
+ except TypeError, error:
+ raise ValueError('Error making query: %s' % error)
+
+
+def format_query(ids, metrics, start_date, end_date=None, dimensions=None,
+ segment=None, filters=None, sort=None, start_index=None,
+ max_results=10000, **kwargs):
+ if isinstance(metrics, basestring):
+ metrics = [metrics]
+ met =','.join(['ga:%s' % x for x in metrics])
+
+ start_date = pd.to_datetime(start_date).strftime('%Y-%m-%d')
+ if end_date is None:
+ end_date = datetime.today()
+ end_date = pd.to_datetime(end_date).strftime('%Y-%m-%d')
+
+ qry = dict(ids='ga:%s' % str(ids),
+ metrics=met,
+ start_date=start_date,
+ end_date=end_date)
+ qry.update(kwargs)
+
+ names = ['dimensions', 'segment', 'filters', 'sort']
+ lst = [dimensions, segment, filters, sort]
+ [_maybe_add_arg(qry, n, d) for n, d in zip(names, lst)]
+
+ if start_index is not None:
+ qry['start_index'] = str(start_index)
+
+ if max_results is not None:
+ qry['max_results'] = str(max_results)
+
+ return qry
+
+def _maybe_add_arg(query, field, data):
+ if data is not None:
+ if isinstance(data, basestring):
+ data = [data]
+ data = ','.join(['ga:%s' % x for x in data])
+ query[field] = data
+
+def _get_match(obj_store, name, id, **kwargs):
+ key, val = None, None
+ if len(kwargs) > 0:
+ key = kwargs.keys()[0]
+ val = kwargs.values()[0]
+
+ if name is None and id is None and key is None:
+ return obj_store.get('items')[0]
+
+ name_ok = lambda item: name is not None and item.get('name') == name
+ id_ok = lambda item: id is not None and item.get('id') == id
+ key_ok = lambda item: key is not None and item.get(key) == val
+
+ match = None
+ if obj_store.get('items'):
+ # TODO look up gapi for faster lookup
+ for item in obj_store.get('items'):
+ if name_ok(item) or id_ok(item) or key_ok(item):
+ return item
+
+def _clean_index(index_dims, parse_dates):
+ _should_add = lambda lst: pd.Index(lst).isin(index_dims).all()
+ to_remove = []
+ to_add = []
+
+ if isinstance(parse_dates, (list, tuple, np.ndarray)):
+ for lst in parse_dates:
+ if isinstance(lst, (list, tuple, np.ndarray)):
+ if _should_add(lst):
+ to_add.append('_'.join(lst))
+ to_remove.extend(lst)
+ elif isinstance(parse_dates, dict):
+ for name, lst in parse_dates.iteritems():
+ if isinstance(lst, (list, tuple, np.ndarray)):
+ if _should_add(lst):
+ to_add.append(name)
+ to_remove.extend(lst)
+
+ index_dims = pd.Index(index_dims)
+ to_remove = pd.Index(set(to_remove))
+ to_add = pd.Index(set(to_add))
+
+ return index_dims - to_remove + to_add
+
+
+def _get_col_names(header_info):
+ return [x['name'][3:] for x in header_info]
+
+def _get_column_types(header_info):
+ return [(x['name'][3:], x['columnType']) for x in header_info]
+
+def _get_dim_names(header_info):
+ return [x['name'][3:] for x in header_info
+ if x['columnType'] == u'DIMENSION']
+
+def _get_met_names(header_info):
+ return [x['name'][3:] for x in header_info
+ if x['columnType'] == u'METRIC']
+
+def _get_data_types(header_info):
+ return [(x['name'][3:], TYPE_MAP.get(x['dataType'], object))
+ for x in header_info]
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
new file mode 100644
index 0000000000000..09d325fa6eef1
--- /dev/null
+++ b/pandas/io/tests/test_ga.py
@@ -0,0 +1,115 @@
+import unittest
+import nose
+import httplib2
+from datetime import datetime
+
+import pandas as pd
+import pandas.core.common as com
+from pandas import DataFrame
+from pandas.util.testing import network, assert_frame_equal
+from numpy.testing.decorators import slow
+
+class TestGoogle(unittest.TestCase):
+
+ _multiprocess_can_split_ = True
+
+ @slow
+ @network
+ def test_getdata(self):
+ try:
+ from pandas.io.ga import GAnalytics, read_ga
+ from pandas.io.auth import AuthenticationConfigError
+ except ImportError:
+ raise nose.SkipTest
+
+ try:
+ end_date = datetime.now()
+ start_date = end_date - pd.offsets.Day() * 5
+ end_date = end_date.strftime('%Y-%m-%d')
+ start_date = start_date.strftime('%Y-%m-%d')
+
+ reader = GAnalytics()
+ df = reader.get_data(
+ metrics=['avgTimeOnSite', 'visitors', 'newVisits',
+ 'pageviewsPerVisit'],
+ start_date = start_date,
+ end_date = end_date,
+ dimensions=['date', 'hour'],
+ parse_dates={'ts' : ['date', 'hour']})
+
+ assert isinstance(df, DataFrame)
+ assert isinstance(df.index, pd.DatetimeIndex)
+ assert len(df) > 1
+ assert 'date' not in df
+ assert 'hour' not in df
+ assert df.index.name == 'ts'
+ assert 'avgTimeOnSite' in df
+ assert 'visitors' in df
+ assert 'newVisits' in df
+ assert 'pageviewsPerVisit' in df
+
+ df2 = read_ga(
+ metrics=['avgTimeOnSite', 'visitors', 'newVisits',
+ 'pageviewsPerVisit'],
+ start_date=start_date,
+ end_date=end_date,
+ dimensions=['date', 'hour'],
+ parse_dates={'ts' : ['date', 'hour']})
+
+ assert_frame_equal(df, df2)
+
+ except AuthenticationConfigError:
+ raise nose.SkipTest
+ except httplib2.ServerNotFoundError:
+ try:
+ h = httplib2.Http()
+ response, content = h.request("http://www.google.com")
+ raise
+ except httplib2.ServerNotFoundError:
+ raise nose.SkipTest
+
+ @slow
+ @network
+ def test_iterator(self):
+ try:
+ from pandas.io.ga import GAnalytics, read_ga
+ from pandas.io.auth import AuthenticationConfigError
+ except ImportError:
+ raise nose.SkipTest
+
+ try:
+ reader = GAnalytics()
+
+ it = reader.get_data(
+ metrics='visitors',
+ start_date='2005-1-1',
+ dimensions='date',
+ max_results=10, chunksize=5)
+
+ df1 = it.next()
+ df2 = it.next()
+
+ for df in [df1, df2]:
+ assert isinstance(df, DataFrame)
+ assert isinstance(df.index, pd.DatetimeIndex)
+ assert len(df) == 5
+ assert 'date' not in df
+ assert df.index.name == 'date'
+ assert 'visitors' in df
+
+ assert (df2.index > df1.index).all()
+
+ except AuthenticationConfigError:
+ raise nose.SkipTest
+ except httplib2.ServerNotFoundError:
+ try:
+ h = httplib2.Http()
+ response, content = h.request("http://www.google.com")
+ raise
+ except httplib2.ServerNotFoundError:
+ raise nose.SkipTest
+
+if __name__ == '__main__':
+ import nose
+ nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
+ exit=False)
| skips tests if no network or authentication config file is missing
@wesm, mainly just want a quick review of the testing suite to make sure it skips the test in the right situations.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2283 | 2012-11-18T20:59:41Z | 2012-12-01T15:25:45Z | 2012-12-01T15:25:45Z | 2014-06-14T13:14:30Z |
Panelnd | diff --git a/pandas/core/api.py b/pandas/core/api.py
old mode 100644
new mode 100755
index 8cf3b7f4cbda4..6cbdae430ba0b
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -4,7 +4,6 @@
import numpy as np
from pandas.core.algorithms import factorize, match, unique, value_counts
-
from pandas.core.common import isnull, notnull, save, load
from pandas.core.categorical import Categorical, Factor
from pandas.core.format import (set_printoptions, reset_printoptions,
@@ -14,6 +13,7 @@
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
+from pandas.core.panel4d import Panel4D
from pandas.core.groupby import groupby
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 0cfb4004708fa..fe44cfaa21107 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -240,11 +240,16 @@ def _multi_take_opportunity(self, tup):
def _multi_take(self, tup):
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
+ from pandas.core.panel4d import Panel4D
if isinstance(self.obj, DataFrame):
index = self._convert_for_reindex(tup[0], axis=0)
columns = self._convert_for_reindex(tup[1], axis=1)
return self.obj.reindex(index=index, columns=columns)
+ elif isinstance(self.obj, Panel4D):
+ conv = [self._convert_for_reindex(x, axis=i)
+ for i, x in enumerate(tup)]
+ return self.obj.reindex(labels=tup[0],items=tup[1], major=tup[2], minor=tup[3])
elif isinstance(self.obj, Panel):
conv = [self._convert_for_reindex(x, axis=i)
for i, x in enumerate(tup)]
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 42adf0420db0d..4f5203b103ce7 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -6,7 +6,6 @@
import operator
import sys
import numpy as np
-
from pandas.core.common import (PandasError, _mut_exclusive,
_try_sort, _default_index, _infer_dtype)
from pandas.core.categorical import Factor
@@ -147,31 +146,45 @@ def f(self, other, axis='items'):
class Panel(NDFrame):
- _AXIS_NUMBERS = {
- 'items': 0,
- 'major_axis': 1,
- 'minor_axis': 2
- }
-
- _AXIS_ALIASES = {
- 'major': 'major_axis',
- 'minor': 'minor_axis'
- }
-
- _AXIS_NAMES = {
- 0: 'items',
- 1: 'major_axis',
- 2: 'minor_axis'
+ _AXIS_ORDERS = ['items','major_axis','minor_axis']
+ _AXIS_NUMBERS = dict([ (a,i) for i, a in enumerate(_AXIS_ORDERS) ])
+ _AXIS_ALIASES = {
+ 'major' : 'major_axis',
+ 'minor' : 'minor_axis'
}
+ _AXIS_NAMES = dict([ (i,a) for i, a in enumerate(_AXIS_ORDERS) ])
+ _AXIS_SLICEMAP = {
+ 'major_axis' : 'index',
+ 'minor_axis' : 'columns'
+ }
+ _AXIS_LEN = len(_AXIS_ORDERS)
# major
_default_stat_axis = 1
- _het_axis = 0
+
+ # info axis
+ _het_axis = 0
+ _info_axis = _AXIS_ORDERS[_het_axis]
items = lib.AxisProperty(0)
major_axis = lib.AxisProperty(1)
minor_axis = lib.AxisProperty(2)
+ @property
+ def _constructor(self):
+ return type(self)
+
+ # return the type of the slice constructor
+ _constructor_sliced = DataFrame
+
+ def _construct_axes_dict(self, axes = None):
+ """ return an axes dictionary for myself """
+ return dict([ (a,getattr(self,a)) for a in (axes or self._AXIS_ORDERS) ])
+
+ def _construct_axes_dict_for_slice(self, axes = None):
+ """ return an axes dictionary for myself """
+ return dict([ (self._AXIS_SLICEMAP[a],getattr(self,a)) for a in (axes or self._AXIS_ORDERS) ])
+
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
@@ -209,10 +222,15 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
+ self._init_data( data=data, items=items, major_axis=major_axis, minor_axis=minor_axis,
+ copy=copy, dtype=dtype)
+
+ def _init_data(self, data, copy, dtype, **kwargs):
+ """ generate ND initialization; axes are passed as required objects to __init__ """
if data is None:
data = {}
- passed_axes = [items, major_axis, minor_axis]
+ passed_axes = [ kwargs.get(a) for a in self._AXIS_ORDERS ]
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
@@ -238,48 +256,47 @@ def _from_axes(cls, data, axes):
if isinstance(data, BlockManager):
return cls(data)
else:
- items, major, minor = axes
- return cls(data, items=items, major_axis=major,
- minor_axis=minor, copy=False)
+ d = dict([ (i, a) for i, a in zip(cls._AXIS_ORDERS,axes) ])
+ d['copy'] = False
+ return cls(data, **d)
def _init_dict(self, data, axes, dtype=None):
- items, major, minor = axes
+ haxis = axes.pop(self._het_axis)
- # prefilter if items passed
- if items is not None:
- items = _ensure_index(items)
- data = dict((k, v) for k, v in data.iteritems() if k in items)
+ # prefilter if haxis passed
+ if haxis is not None:
+ haxis = _ensure_index(haxis)
+ data = dict((k, v) for k, v in data.iteritems() if k in haxis)
else:
- items = Index(_try_sort(data.keys()))
+ haxis = Index(_try_sort(data.keys()))
for k, v in data.iteritems():
if isinstance(v, dict):
- data[k] = DataFrame(v)
+ data[k] = self._constructor_sliced(v)
- if major is None:
- major = _extract_axis(data, axis=0)
+ # extract axis for remaining axes & create the slicemap
+ raxes = [ self._extract_axis(self, data, axis=i) if a is None else a for i, a in enumerate(axes) ]
+ raxes_sm = self._extract_axes_for_slice(self, raxes)
- if minor is None:
- minor = _extract_axis(data, axis=1)
-
- axes = [items, major, minor]
+ # shallow copy
arrays = []
-
- item_shape = len(major), len(minor)
- for item in items:
- v = values = data.get(item)
+ reshaped_data = data.copy()
+ haxis_shape = [ len(a) for a in raxes ]
+ for h in haxis:
+ v = values = data.get(h)
if v is None:
- values = np.empty(item_shape, dtype=dtype)
+ values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
- elif isinstance(v, DataFrame):
- v = v.reindex(index=major, columns=minor, copy=False)
+ elif isinstance(v, self._constructor_sliced):
+ d = raxes_sm.copy()
+ d['copy'] = False
+ v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
-
arrays.append(values)
- return self._init_arrays(arrays, items, axes)
+ return self._init_arrays(arrays, haxis, [ haxis ] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
# segregates dtypes and forms blocks matching to columns
@@ -289,7 +306,7 @@ def _init_arrays(self, arrays, arr_names, axes):
@property
def shape(self):
- return len(self.items), len(self.major_axis), len(self.minor_axis)
+ return [ len(getattr(self,a)) for a in self._AXIS_ORDERS ]
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
@@ -326,32 +343,33 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None):
elif orient != 'items': # pragma: no cover
raise ValueError('only recognize items or minor for orientation')
- data, index, columns = _homogenize_dict(data, intersect=intersect,
- dtype=dtype)
- items = Index(sorted(data.keys()))
- return cls(data, items, index, columns)
+ d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
+ d[cls._info_axis] = Index(sorted(d['data'].keys()))
+ return cls(**d)
def __getitem__(self, key):
- if isinstance(self.items, MultiIndex):
+ if isinstance(getattr(self,self._info_axis), MultiIndex):
return self._getitem_multilevel(key)
return super(Panel, self).__getitem__(key)
def _getitem_multilevel(self, key):
- loc = self.items.get_loc(key)
+ info = getattr(self,self._info_axis)
+ loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
- new_index = self.items[loc]
+ new_index = info[loc]
result_index = _maybe_droplevels(new_index, key)
- new_values = self.values[loc, :, :]
- result = Panel(new_values,
- items=result_index,
- major_axis=self.major_axis,
- minor_axis=self.minor_axis)
+ slices = [loc] + [slice(None) for x in range(self._AXIS_LEN-1)]
+ new_values = self.values[slices]
+
+ d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
+ d[self._info_axis] = result_index
+ result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
- values = _prep_ndarray(data, copy=copy)
+ values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
@@ -379,9 +397,9 @@ def __array__(self, dtype=None):
return self.values
def __array_wrap__(self, result):
- return self._constructor(result, items=self.items,
- major_axis=self.major_axis,
- minor_axis=self.minor_axis, copy=False)
+ d = self._construct_axes_dict(self._AXIS_ORDERS)
+ d['copy'] = False
+ return self._constructor(result, **d)
#----------------------------------------------------------------------
# Magic methods
@@ -389,37 +407,26 @@ def __array_wrap__(self, result):
def __repr__(self):
class_name = str(self.__class__)
- I, N, K = len(self.items), len(self.major_axis), len(self.minor_axis)
+ shape = self.shape
+ dims = 'Dimensions: %s' % ' x '.join([ "%d (%s)" % (s, a) for a,s in zip(self._AXIS_ORDERS,shape) ])
- dims = 'Dimensions: %d (items) x %d (major) x %d (minor)' % (I, N, K)
-
- if len(self.major_axis) > 0:
- major = 'Major axis: %s to %s' % (self.major_axis[0],
- self.major_axis[-1])
- else:
- major = 'Major axis: None'
-
- if len(self.minor_axis) > 0:
- minor = 'Minor axis: %s to %s' % (self.minor_axis[0],
- self.minor_axis[-1])
- else:
- minor = 'Minor axis: None'
-
- if len(self.items) > 0:
- items = 'Items: %s to %s' % (self.items[0], self.items[-1])
- else:
- items = 'Items: None'
-
- output = '%s\n%s\n%s\n%s\n%s' % (class_name, dims, items, major, minor)
+ def axis_pretty(a):
+ v = getattr(self,a)
+ if len(v) > 0:
+ return '%s axis: %s to %s' % (a.capitalize(),v[0],v[-1])
+ else:
+ return '%s axis: None' % a.capitalize()
+
+ output = '\n'.join([class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def __iter__(self):
- return iter(self.items)
+ return iter(getattr(self,self._info_axis))
def iteritems(self):
- for item in self.items:
- yield item, self[item]
+ for h in getattr(self,self._info_axis):
+ yield h, self[h]
# Name that won't get automatically converted to items by 2to3. items is
# already in use for the first axis.
@@ -443,10 +450,6 @@ def _get_plane_axes(self, axis):
return index, columns
- @property
- def _constructor(self):
- return type(self)
-
# Fancy indexing
_ix = None
@@ -516,7 +519,7 @@ def _get_values(self):
#----------------------------------------------------------------------
# Getting and setting elements
- def get_value(self, item, major, minor):
+ def get_value(self, *args):
"""
Quickly retrieve single value at (item, major, minor) location
@@ -530,11 +533,14 @@ def get_value(self, item, major, minor):
-------
value : scalar value
"""
+ # require an arg for each axis
+ assert(len(args) == self._AXIS_LEN)
+
# hm, two layers to the onion
- frame = self._get_item_cache(item)
- return frame.get_value(major, minor)
+ frame = self._get_item_cache(args[0])
+ return frame.get_value(*args[1:])
- def set_value(self, item, major, minor, value):
+ def set_value(self, *args):
"""
Quickly set single value at (item, major, minor) location
@@ -551,30 +557,35 @@ def set_value(self, item, major, minor, value):
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
+ # require an arg for each axis and the value
+ assert(len(args) == self._AXIS_LEN+1)
+
try:
- frame = self._get_item_cache(item)
- frame.set_value(major, minor, value)
+ frame = self._get_item_cache(args[0])
+ frame.set_value(*args[1:])
return self
except KeyError:
- ax1, ax2, ax3 = self._expand_axes((item, major, minor))
- result = self.reindex(items=ax1, major=ax2, minor=ax3, copy=False)
+ axes = self._expand_axes(args)
+ d = dict([ (a,ax) for a,ax in zip(self._AXIS_ORDERS,axes) ])
+ d['copy'] = False
+ result = self.reindex(**d)
- likely_dtype = com._infer_dtype(value)
- made_bigger = not np.array_equal(ax1, self.items)
+ likely_dtype = com._infer_dtype(args[-1])
+ made_bigger = not np.array_equal(axes[0], getattr(self,self._info_axis))
# how to make this logic simpler?
if made_bigger:
- com._possibly_cast_item(result, item, likely_dtype)
+ com._possibly_cast_item(result, args[0], likely_dtype)
- return result.set_value(item, major, minor, value)
+ return result.set_value(*args)
def _box_item_values(self, key, values):
- return DataFrame(values, index=self.major_axis,
- columns=self.minor_axis)
+ d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
+ return self._constructor_sliced(values, **d)
def __getattr__(self, name):
"""After regular attribute access, try looking up the name of an item.
This allows simpler access to items for interactive use."""
- if name in self.items:
+ if name in getattr(self,self._info_axis):
return self[name]
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
@@ -584,22 +595,21 @@ def _slice(self, slobj, axis=0):
return self._constructor(new_data)
def __setitem__(self, key, value):
- _, N, K = self.shape
- if isinstance(value, DataFrame):
- value = value.reindex(index=self.major_axis,
- columns=self.minor_axis)
+ shape = tuple(self.shape)
+ if isinstance(value, self._constructor_sliced):
+ value = value.reindex(**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
- assert(value.shape == (N, K))
+ assert(value.shape == shape[1:])
mat = np.asarray(value)
elif np.isscalar(value):
dtype = _infer_dtype(value)
- mat = np.empty((N, K), dtype=dtype)
+ mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
- mat = mat.reshape((1, N, K))
+ mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def pop(self, item):
@@ -660,11 +670,11 @@ def conform(self, frame, axis='items'):
-------
DataFrame
"""
- index, columns = self._get_plane_axes(axis)
- return frame.reindex(index=index, columns=columns)
+ axes = self._get_plane_axes(axis)
+ return frame.reindex(**self._extract_axes_for_slice(self, axes))
- def reindex(self, major=None, items=None, minor=None, method=None,
- major_axis=None, minor_axis=None, copy=True):
+ def reindex(self, major=None, minor=None, method=None,
+ major_axis=None, minor_axis=None, copy=True, **kwargs):
"""
Conform panel to new axis or axes
@@ -691,19 +701,24 @@ def reindex(self, major=None, items=None, minor=None, method=None,
major = _mut_exclusive(major, major_axis)
minor = _mut_exclusive(minor, minor_axis)
+ al = self._AXIS_LEN
- if (method is None and not self._is_mixed_type and
- com._count_not_none(items, major, minor) == 3):
- return self._reindex_multi(items, major, minor)
+ # only allowing multi-index on Panel (and not > dims)
+ if (method is None and not self._is_mixed_type and al <= 3):
+ items = kwargs.get('items')
+ if com._count_not_none(items, major, minor) == 3:
+ return self._reindex_multi(items, major, minor)
if major is not None:
- result = result._reindex_axis(major, method, 1, copy)
+ result = result._reindex_axis(major, method, al-2, copy)
if minor is not None:
- result = result._reindex_axis(minor, method, 2, copy)
+ result = result._reindex_axis(minor, method, al-1, copy)
- if items is not None:
- result = result._reindex_axis(items, method, 0, copy)
+ for i, a in enumerate(self._AXIS_ORDERS[0:al-2]):
+ a = kwargs.get(a)
+ if a is not None:
+ result = result._reindex_axis(a, method, i, copy)
if result is self and copy:
raise ValueError('Must specify at least one axis')
@@ -768,8 +783,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True):
return self._reindex_axis(labels, method, axis, copy)
def reindex_like(self, other, method=None):
- """
- Reindex Panel to match indices of another Panel
+ """ return an object with matching indicies to myself
Parameters
----------
@@ -780,9 +794,9 @@ def reindex_like(self, other, method=None):
-------
reindexed : Panel
"""
- # todo: object columns
- return self.reindex(major=other.major_axis, items=other.items,
- minor=other.minor_axis, method=method)
+ d = other._construct_axes_dict()
+ d['method'] = method
+ return self.reindex(**d)
def dropna(self, axis=0, how='any'):
"""
@@ -826,8 +840,8 @@ def _combine(self, other, func, axis=0):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
new_values = func(self.values, other)
- return self._constructor(new_values, self.items, self.major_axis,
- self.minor_axis)
+ d = self._construct_axes_dict()
+ return self._constructor(new_values, **d)
def __neg__(self):
return -1 * self
@@ -924,7 +938,7 @@ def major_xs(self, key, copy=True):
y : DataFrame
index -> minor axis, columns -> items
"""
- return self.xs(key, axis=1, copy=copy)
+ return self.xs(key, axis=self._AXIS_LEN-2, copy=copy)
def minor_xs(self, key, copy=True):
"""
@@ -942,7 +956,7 @@ def minor_xs(self, key, copy=True):
y : DataFrame
index -> major axis, columns -> items
"""
- return self.xs(key, axis=2, copy=copy)
+ return self.xs(key, axis=self._AXIS_LEN-1, copy=copy)
def xs(self, key, axis=1, copy=True):
"""
@@ -956,7 +970,7 @@ def xs(self, key, axis=1, copy=True):
Returns
-------
- y : DataFrame
+ y : ndim(self)-1
"""
if axis == 0:
data = self[key]
@@ -967,7 +981,7 @@ def xs(self, key, axis=1, copy=True):
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=copy)
- return DataFrame(new_data)
+ return self._constructor_sliced(new_data)
def _ixs(self, i, axis=0):
# for compatibility with .ix indexing
@@ -1010,15 +1024,14 @@ def swapaxes(self, axis1='major', axis2='minor', copy=True):
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
- for k in range(3))
+ for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes)
- def transpose(self, items='items', major='major', minor='minor',
- copy=False):
+ def transpose(self, *args, **kwargs):
"""
Permute the dimensions of the Panel
@@ -1040,16 +1053,27 @@ def transpose(self, items='items', major='major', minor='minor',
-------
y : Panel (new object)
"""
- i, j, k = [self._get_axis_number(x) for x in [items, major, minor]]
- if i == j or i == k or j == k:
- raise ValueError('Must specify 3 unique axes')
-
- new_axes = [self._get_axis(x) for x in [i, j, k]]
- new_values = self.values.transpose((i, j, k))
- if copy:
+ # construct the args
+ args = list(args)
+ for a in self._AXIS_ORDERS:
+ if not a in kwargs:
+ try:
+ kwargs[a] = args.pop(0)
+ except (IndexError):
+ raise ValueError("not enough arguments specified to transpose!")
+
+ axes = [self._get_axis_number(kwargs[a]) for a in self._AXIS_ORDERS]
+
+ # we must have unique axes
+ if len(axes) != len(set(axes)):
+ raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
+
+ new_axes = dict([ (a,self._get_axis(x)) for a, x in zip(self._AXIS_ORDERS,axes)])
+ new_values = self.values.transpose(tuple(axes))
+ if kwargs.get('copy') or (len(args) and args[-1]):
new_values = new_values.copy()
- return self._constructor(new_values, *new_axes)
+ return self._constructor(new_values, **new_axes)
def to_frame(self, filter_observations=True):
"""
@@ -1140,20 +1164,24 @@ def _reduce(self, op, axis=0, skipna=True):
result = f(self.values)
- index, columns = self._get_plane_axes(axis_name)
- if axis_name != 'items':
+ axes = self._get_plane_axes(axis_name)
+ if result.ndim == 2 and axis_name != self._info_axis:
result = result.T
- return DataFrame(result, index=index, columns=columns)
+ return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
- index, columns = self._get_plane_axes(axis)
-
- if axis != 'items':
+ axes = self._get_plane_axes(axis)
+ if result.ndim == 2 and axis != self._info_axis:
result = result.T
- return DataFrame(result, index=index, columns=columns)
+ # do we have reduced dimensionalility?
+ if self.ndim == result.ndim:
+ return self._constructor(result, **self._construct_axes_dict())
+ elif self.ndim == result.ndim+1:
+ return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes))
+ raise PandasError("invalid _wrap_result [self->%s] [result->%s]" % (self.ndim,result.ndim))
def count(self, axis='major'):
"""
@@ -1381,71 +1409,83 @@ def _get_join_index(self, other, how):
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
-WidePanel = Panel
-LongPanel = DataFrame
-
-
-def _prep_ndarray(values, copy=True):
- if not isinstance(values, np.ndarray):
- values = np.asarray(values)
- # NumPy strings are a pain, convert to object
- if issubclass(values.dtype.type, basestring):
- values = np.array(values, dtype=object, copy=True)
- else:
- if copy:
- values = values.copy()
- assert(values.ndim == 3)
- return values
-
-
-def _homogenize_dict(frames, intersect=True, dtype=None):
- """
- Conform set of DataFrame-like objects to either an intersection
- of indices / columns or a union.
-
- Parameters
- ----------
- frames : dict
- intersect : boolean, default True
+ # miscellaneous data creation
+ @staticmethod
+ def _extract_axes(self, data, axes, **kwargs):
+ """ return a list of the axis indicies """
+ return [ self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes) ]
+
+ @staticmethod
+ def _extract_axes_for_slice(self, axes):
+ """ return the slice dictionary for these axes """
+ return dict([ (self._AXIS_SLICEMAP[i], a) for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN-len(axes):],axes) ])
+
+ @staticmethod
+ def _prep_ndarray(self, values, copy=True):
+ if not isinstance(values, np.ndarray):
+ values = np.asarray(values)
+ # NumPy strings are a pain, convert to object
+ if issubclass(values.dtype.type, basestring):
+ values = np.array(values, dtype=object, copy=True)
+ else:
+ if copy:
+ values = values.copy()
+ assert(values.ndim == self._AXIS_LEN)
+ return values
- Returns
- -------
- dict of aligned frames, index, columns
- """
- result = {}
+ @staticmethod
+ def _homogenize_dict(self, frames, intersect=True, dtype=None):
+ """
+ Conform set of _constructor_sliced-like objects to either an intersection
+ of indices / columns or a union.
+
+ Parameters
+ ----------
+ frames : dict
+ intersect : boolean, default True
+
+ Returns
+ -------
+ dict of aligned results & indicies
+ """
+ result = {}
- adj_frames = {}
- for k, v in frames.iteritems():
- if isinstance(v, dict):
- adj_frames[k] = DataFrame(v)
- else:
- adj_frames[k] = v
+ adj_frames = {}
+ for k, v in frames.iteritems():
+ if isinstance(v, dict):
+ adj_frames[k] = self._constructor_sliced(v)
+ else:
+ adj_frames[k] = v
- index = _extract_axis(adj_frames, axis=0, intersect=intersect)
- columns = _extract_axis(adj_frames, axis=1, intersect=intersect)
+ axes = self._AXIS_ORDERS[1:]
+ axes_dict = dict([ (a,ax) for a,ax in zip(axes,self._extract_axes(self, adj_frames, axes, intersect=intersect)) ])
- for key, frame in adj_frames.iteritems():
- if frame is not None:
- result[key] = frame.reindex(index=index, columns=columns,
- copy=False)
- else:
- result[key] = None
+ reindex_dict = dict([ (self._AXIS_SLICEMAP[a],axes_dict[a]) for a in axes ])
+ reindex_dict['copy'] = False
+ for key, frame in adj_frames.iteritems():
+ if frame is not None:
+ result[key] = frame.reindex(**reindex_dict)
+ else:
+ result[key] = None
- return result, index, columns
+ axes_dict['data'] = result
+ return axes_dict
+ @staticmethod
+ def _extract_axis(self, data, axis=0, intersect=False):
-def _extract_axis(data, axis=0, intersect=False):
- if len(data) == 0:
- index = Index([])
- elif len(data) > 0:
- raw_lengths = []
- indexes = []
index = None
+ if len(data) == 0:
+ index = Index([])
+ elif len(data) > 0:
+ raw_lengths = []
+ indexes = []
+
have_raw_arrays = False
have_frames = False
for v in data.values():
- if isinstance(v, DataFrame):
+ if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
@@ -1459,7 +1499,7 @@ def _extract_axis(data, axis=0, intersect=False):
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
-
+
if have_frames:
assert(lengths[0] == len(index))
else:
@@ -1468,7 +1508,10 @@ def _extract_axis(data, axis=0, intersect=False):
if index is None:
index = Index([])
- return _ensure_index(index)
+ return _ensure_index(index)
+
+WidePanel = Panel
+LongPanel = DataFrame
def _monotonic(arr):
diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py
new file mode 100755
index 0000000000000..504111bef5414
--- /dev/null
+++ b/pandas/core/panel4d.py
@@ -0,0 +1,112 @@
+""" Panel4D: a 4-d dict like collection of panels """
+
+from pandas.core.panel import Panel
+import pandas.lib as lib
+
+
+class Panel4D(Panel):
+ _AXIS_ORDERS = ['labels','items','major_axis','minor_axis']
+ _AXIS_NUMBERS = dict([ (a,i) for i, a in enumerate(_AXIS_ORDERS) ])
+ _AXIS_ALIASES = {
+ 'major' : 'major_axis',
+ 'minor' : 'minor_axis'
+ }
+ _AXIS_NAMES = dict([ (i,a) for i, a in enumerate(_AXIS_ORDERS) ])
+ _AXIS_SLICEMAP = {
+ 'items' : 'items',
+ 'major_axis' : 'major_axis',
+ 'minor_axis' : 'minor_axis'
+ }
+ _AXIS_LEN = len(_AXIS_ORDERS)
+
+ # major
+ _default_stat_axis = 2
+
+ # info axis
+ _het_axis = 0
+ _info_axis = _AXIS_ORDERS[_het_axis]
+
+ labels = lib.AxisProperty(0)
+ items = lib.AxisProperty(1)
+ major_axis = lib.AxisProperty(2)
+ minor_axis = lib.AxisProperty(3)
+
+ _constructor_sliced = Panel
+
+ def __init__(self, data=None, labels=None, items=None, major_axis=None, minor_axis=None, copy=False, dtype=None):
+ """
+ Represents a 4 dimensonal structured
+
+ Parameters
+ ----------
+ data : ndarray (labels x items x major x minor), or dict of Panels
+
+ labels : Index or array-like : axis=0
+ items : Index or array-like : axis=1
+ major_axis : Index or array-like: axis=2
+ minor_axis : Index or array-like: axis=3
+
+ dtype : dtype, default None
+ Data type to force, otherwise infer
+ copy : boolean, default False
+ Copy data from inputs. Only affects DataFrame / 2d ndarray input
+ """
+ self._init_data( data=data, labels=labels, items=items, major_axis=major_axis, minor_axis=minor_axis,
+ copy=copy, dtype=dtype)
+
+ def _get_plane_axes(self, axis):
+ axis = self._get_axis_name(axis)
+
+ if axis == 'major_axis':
+ items = self.labels
+ major = self.items
+ minor = self.minor_axis
+ elif axis == 'minor_axis':
+ items = self.labels
+ major = self.items
+ minor = self.major_axis
+ elif axis == 'items':
+ items = self.labels
+ major = self.major_axis
+ minor = self.minor_axis
+ elif axis == 'labels':
+ items = self.items
+ major = self.major_axis
+ minor = self.minor_axis
+
+ return items, major, minor
+
+ def _combine(self, other, func, axis=0):
+ if isinstance(other, Panel4D):
+ return self._combine_panel4d(other, func)
+ return super(Panel4D, self)._combine(other, func, axis=axis)
+
+ def _combine_panel4d(self, other, func):
+ labels = self.labels + other.labels
+ items = self.items + other.items
+ major = self.major_axis + other.major_axis
+ minor = self.minor_axis + other.minor_axis
+
+ # could check that everything's the same size, but forget it
+ this = self.reindex(labels=labels, items=items, major=major, minor=minor)
+ other = other.reindex(labels=labels, items=items, major=major, minor=minor)
+
+ result_values = func(this.values, other.values)
+
+ return self._constructor(result_values, labels, items, major, minor)
+
+ def join(self, other, how='left', lsuffix='', rsuffix=''):
+ if isinstance(other, Panel4D):
+ join_major, join_minor = self._get_join_index(other, how)
+ this = self.reindex(major=join_major, minor=join_minor)
+ other = other.reindex(major=join_major, minor=join_minor)
+ merged_data = this._data.merge(other._data, lsuffix, rsuffix)
+ return self._constructor(merged_data)
+ return super(Panel4D, self).join(other=other,how=how,lsuffix=lsuffix,rsuffix=rsuffix)
+
+ ### remove operations ####
+ def to_frame(self, *args, **kwargs):
+ raise NotImplementedError
+ def to_excel(self, *args, **kwargs):
+ raise NotImplementedError
+
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
new file mode 100644
index 0000000000000..e4638750aa1b2
--- /dev/null
+++ b/pandas/core/panelnd.py
@@ -0,0 +1,122 @@
+""" Factory methods to create N-D panels """
+
+import pandas
+from pandas.core.panel import Panel
+import pandas.lib as lib
+
+def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_aliases = None, stat_axis = 2):
+ """ manufacture a n-d class:
+
+ parameters
+ ----------
+ klass_name : the klass name
+ axis_orders : the names of the axes in order (highest to lowest)
+ axis_slices : a dictionary that defines how the axes map to the sliced axis
+ slicer : the class representing a slice of this panel
+ axis_aliases: a dictionary defining aliases for various axes
+ default = { major : major_axis, minor : minor_axis }
+ stat_axis : the default statistic axis
+ default = 2
+ het_axis : the info axis
+
+
+ returns
+ -------
+ a class object reprsenting this panel
+
+
+ """
+
+ # build the klass
+ klass = type(klass_name, (slicer,),{})
+
+ # add the class variables
+ klass._AXIS_ORDERS = axis_orders
+ klass._AXIS_NUMBERS = dict([ (a,i) for i, a in enumerate(axis_orders) ])
+ klass._AXIS_ALIASES = axis_aliases or dict()
+ klass._AXIS_NAMES = dict([ (i,a) for i, a in enumerate(axis_orders) ])
+ klass._AXIS_SLICEMAP = axis_slices
+ klass._AXIS_LEN = len(axis_orders)
+ klass._default_stat_axis = stat_axis
+ klass._het_axis = 0
+ klass._info_axis = axis_orders[klass._het_axis]
+ klass._constructor_sliced = slicer
+
+ # add the axes
+ for i, a in enumerate(axis_orders):
+ setattr(klass,a,lib.AxisProperty(i))
+
+ # define the __init__
+ def __init__(self, *args, **kwargs):
+ if not (kwargs.get('data') or len(args)):
+ raise Exception("must supply at least a data argument to [%s]" % klass_name)
+ if 'copy' not in kwargs:
+ kwargs['copy'] = False
+ if 'dtype' not in kwargs:
+ kwargs['dtype'] = None
+ self._init_data( *args, **kwargs)
+ klass.__init__ = __init__
+
+ # define _get_place_axes
+ def _get_plane_axes(self, axis):
+ axis = self._get_axis_name(axis)
+ index = self._AXIS_ORDERS.index(axis)
+
+ planes = []
+ if index:
+ planes.extend(self._AXIS_ORDERS[0:index])
+ if index != self._AXIS_LEN:
+ planes.extend(self._AXIS_ORDERS[index:])
+
+ return planes
+ klass._get_plane_axes
+
+ # remove these operations
+ def to_frame(self, *args, **kwargs):
+ raise NotImplementedError
+ klass.to_frame = to_frame
+ def to_excel(self, *args, **kwargs):
+ raise NotImplementedError
+ klass.to_excel = to_excel
+
+ return klass
+
+
+if __name__ == '__main__':
+
+ # create a sample
+ from pandas.util import testing
+ print pandas.__version__
+
+ # create a 4D
+ Panel4DNew = create_nd_panel_factory(
+ klass_name = 'Panel4DNew',
+ axis_orders = ['labels1','items1','major_axis','minor_axis'],
+ axis_slices = { 'items1' : 'items', 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
+ slicer = Panel,
+ axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
+ stat_axis = 2)
+
+ p4dn = Panel4DNew(dict(L1 = testing.makePanel(), L2 = testing.makePanel()))
+ print "creating a 4-D Panel"
+ print p4dn, "\n"
+
+ # create a 5D
+ Panel5DNew = create_nd_panel_factory(
+ klass_name = 'Panel5DNew',
+ axis_orders = [ 'cool1', 'labels1','items1','major_axis','minor_axis'],
+ axis_slices = { 'labels1' : 'labels1', 'items1' : 'items', 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
+ slicer = Panel4DNew,
+ axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
+ stat_axis = 2)
+
+ p5dn = Panel5DNew(dict(C1 = p4dn))
+
+ print "creating a 5-D Panel"
+ print p5dn, "\n"
+
+ print "Slicing p5dn"
+ print p5dn.ix['C1',:,:,0:3,:], "\n"
+
+ print "Transposing p5dn"
+ print p5dn.transpose(1,2,3,4,0), "\n"
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
old mode 100644
new mode 100755
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
new file mode 100755
index 0000000000000..40bd84044dbdd
--- /dev/null
+++ b/pandas/tests/test_panel4d.py
@@ -0,0 +1,1049 @@
+from datetime import datetime
+import os
+import operator
+import unittest
+import nose
+
+import numpy as np
+
+from pandas import DataFrame, Index, isnull, notnull, pivot, MultiIndex
+from pandas.core.datetools import bday
+from pandas.core.frame import group_agg
+from pandas.core.panel import Panel
+from pandas.core.panel4d import Panel4D
+from pandas.core.series import remove_na
+import pandas.core.common as com
+import pandas.core.panel as panelmod
+from pandas.util import py3compat
+from pandas.io.parsers import (ExcelFile, ExcelWriter)
+
+from pandas.util.testing import (assert_panel_equal,
+ assert_panel4d_equal,
+ assert_frame_equal,
+ assert_series_equal,
+ assert_almost_equal)
+import pandas.util.testing as tm
+
+def add_nans(panel4d):
+ for l, label in enumerate(panel4d.labels):
+ panel = panel4d[label]
+ tm.add_nans(panel)
+
+class SafeForLongAndSparse(object):
+
+ def test_repr(self):
+ foo = repr(self.panel4d)
+
+ def test_iter(self):
+ tm.equalContents(list(self.panel4d), self.panel4d.labels)
+
+ def test_count(self):
+ f = lambda s: notnull(s).sum()
+ self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
+
+ def test_sum(self):
+ self._check_stat_op('sum', np.sum)
+
+ def test_mean(self):
+ self._check_stat_op('mean', np.mean)
+
+ def test_prod(self):
+ self._check_stat_op('prod', np.prod)
+
+ def test_median(self):
+ def wrapper(x):
+ if isnull(x).any():
+ return np.nan
+ return np.median(x)
+
+ self._check_stat_op('median', wrapper)
+
+ def test_min(self):
+ self._check_stat_op('min', np.min)
+
+ def test_max(self):
+ self._check_stat_op('max', np.max)
+
+ def test_skew(self):
+ from scipy.stats import skew
+ def this_skew(x):
+ if len(x) < 3:
+ return np.nan
+ return skew(x, bias=False)
+ self._check_stat_op('skew', this_skew)
+
+ # def test_mad(self):
+ # f = lambda x: np.abs(x - x.mean()).mean()
+ # self._check_stat_op('mad', f)
+
+ def test_var(self):
+ def alt(x):
+ if len(x) < 2:
+ return np.nan
+ return np.var(x, ddof=1)
+ self._check_stat_op('var', alt)
+
+ def test_std(self):
+ def alt(x):
+ if len(x) < 2:
+ return np.nan
+ return np.std(x, ddof=1)
+ self._check_stat_op('std', alt)
+
+ # def test_skew(self):
+ # from scipy.stats import skew
+
+ # def alt(x):
+ # if len(x) < 3:
+ # return np.nan
+ # return skew(x, bias=False)
+
+ # self._check_stat_op('skew', alt)
+
+ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
+ if obj is None:
+ obj = self.panel4d
+
+ # # set some NAs
+ # obj.ix[5:10] = np.nan
+ # obj.ix[15:20, -2:] = np.nan
+
+ f = getattr(obj, name)
+
+ if has_skipna:
+ def skipna_wrapper(x):
+ nona = remove_na(x)
+ if len(nona) == 0:
+ return np.nan
+ return alternative(nona)
+
+ def wrapper(x):
+ return alternative(np.asarray(x))
+
+ for i in range(obj.ndim):
+ result = f(axis=i, skipna=False)
+ assert_panel_equal(result, obj.apply(wrapper, axis=i))
+ else:
+ skipna_wrapper = alternative
+ wrapper = alternative
+
+ for i in range(obj.ndim):
+ result = f(axis=i)
+ assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
+
+ self.assertRaises(Exception, f, axis=obj.ndim)
+
+class SafeForSparse(object):
+
+ @classmethod
+ def assert_panel_equal(cls, x, y):
+ assert_panel_equal(x, y)
+
+ @classmethod
+ def assert_panel4d_equal(cls, x, y):
+ assert_panel4d_equal(x, y)
+
+ def test_get_axis(self):
+ assert(self.panel4d._get_axis(0) is self.panel4d.labels)
+ assert(self.panel4d._get_axis(1) is self.panel4d.items)
+ assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)
+ assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
+
+ def test_set_axis(self):
+ new_labels = Index(np.arange(len(self.panel4d.labels)))
+ new_items = Index(np.arange(len(self.panel4d.items)))
+ new_major = Index(np.arange(len(self.panel4d.major_axis)))
+ new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
+
+ # ensure propagate to potentially prior-cached items too
+ label = self.panel4d['l1']
+ self.panel4d.labels = new_labels
+
+ if hasattr(self.panel4d, '_item_cache'):
+ self.assert_('l1' not in self.panel4d._item_cache)
+ self.assert_(self.panel4d.labels is new_labels)
+
+ self.panel4d.major_axis = new_major
+ self.assert_(self.panel4d[0].major_axis is new_major)
+ self.assert_(self.panel4d.major_axis is new_major)
+
+ self.panel4d.minor_axis = new_minor
+ self.assert_(self.panel4d[0].minor_axis is new_minor)
+ self.assert_(self.panel4d.minor_axis is new_minor)
+
+ def test_get_axis_number(self):
+ self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
+ self.assertEqual(self.panel4d._get_axis_number('items'), 1)
+ self.assertEqual(self.panel4d._get_axis_number('major'), 2)
+ self.assertEqual(self.panel4d._get_axis_number('minor'), 3)
+
+ def test_get_axis_name(self):
+ self.assertEqual(self.panel4d._get_axis_name(0), 'labels')
+ self.assertEqual(self.panel4d._get_axis_name(1), 'items')
+ self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')
+ self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
+
+ #def test_get_plane_axes(self):
+ # # what to do here?
+
+ # index, columns = self.panel._get_plane_axes('items')
+ # index, columns = self.panel._get_plane_axes('major_axis')
+ # index, columns = self.panel._get_plane_axes('minor_axis')
+ # index, columns = self.panel._get_plane_axes(0)
+
+ def test_truncate(self):
+ raise nose.SkipTest
+
+ #dates = self.panel.major_axis
+ #start, end = dates[1], dates[5]
+
+ #trunced = self.panel.truncate(start, end, axis='major')
+ #expected = self.panel['ItemA'].truncate(start, end)
+
+ #assert_frame_equal(trunced['ItemA'], expected)
+
+ #trunced = self.panel.truncate(before=start, axis='major')
+ #expected = self.panel['ItemA'].truncate(before=start)
+
+ #assert_frame_equal(trunced['ItemA'], expected)
+
+ #trunced = self.panel.truncate(after=end, axis='major')
+ #expected = self.panel['ItemA'].truncate(after=end)
+
+ #assert_frame_equal(trunced['ItemA'], expected)
+
+ # XXX test other axes
+
+ def test_arith(self):
+ self._test_op(self.panel4d, operator.add)
+ self._test_op(self.panel4d, operator.sub)
+ self._test_op(self.panel4d, operator.mul)
+ self._test_op(self.panel4d, operator.truediv)
+ self._test_op(self.panel4d, operator.floordiv)
+ self._test_op(self.panel4d, operator.pow)
+
+ self._test_op(self.panel4d, lambda x, y: y + x)
+ self._test_op(self.panel4d, lambda x, y: y - x)
+ self._test_op(self.panel4d, lambda x, y: y * x)
+ self._test_op(self.panel4d, lambda x, y: y / x)
+ self._test_op(self.panel4d, lambda x, y: y ** x)
+
+ self.assertRaises(Exception, self.panel4d.__add__, self.panel4d['l1'])
+
+ @staticmethod
+ def _test_op(panel4d, op):
+ result = op(panel4d, 1)
+ assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
+
+ def test_keys(self):
+ tm.equalContents(self.panel4d.keys(), self.panel4d.labels)
+
+ def test_iteritems(self):
+ """Test panel4d.iteritems(), aka panel4d.iterkv()"""
+ # just test that it works
+ for k, v in self.panel4d.iterkv():
+ pass
+
+ self.assertEqual(len(list(self.panel4d.iterkv())),
+ len(self.panel4d.labels))
+
+ def test_combinePanel4d(self):
+ result = self.panel4d.add(self.panel4d)
+ self.assert_panel4d_equal(result, self.panel4d * 2)
+
+ def test_neg(self):
+ self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
+
+ def test_select(self):
+ p = self.panel4d
+
+ # select labels
+ result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
+ expected = p.reindex(labels=['l1','l3'])
+ self.assert_panel4d_equal(result, expected)
+
+ # select items
+ result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
+ expected = p.reindex(items=['ItemA', 'ItemC'])
+ self.assert_panel4d_equal(result, expected)
+
+ # select major_axis
+ result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
+ new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
+ expected = p.reindex(major=new_major)
+ self.assert_panel4d_equal(result, expected)
+
+ # select minor_axis
+ result = p.select(lambda x: x in ('D', 'A'), axis=3)
+ expected = p.reindex(minor=['A', 'D'])
+ self.assert_panel4d_equal(result, expected)
+
+ # corner case, empty thing
+ result = p.select(lambda x: x in ('foo',), axis='items')
+ self.assert_panel4d_equal(result, p.reindex(items=[]))
+
+ def test_get_value(self):
+ for item in self.panel.items:
+ for mjr in self.panel.major_axis[::2]:
+ for mnr in self.panel.minor_axis:
+ result = self.panel.get_value(item, mjr, mnr)
+ expected = self.panel[item][mnr][mjr]
+ assert_almost_equal(result, expected)
+
+ def test_abs(self):
+ result = self.panel4d.abs()
+ expected = np.abs(self.panel4d)
+ self.assert_panel4d_equal(result, expected)
+
+ p = self.panel4d['l1']
+ result = p.abs()
+ expected = np.abs(p)
+ assert_panel_equal(result, expected)
+
+ df = p['ItemA']
+ result = df.abs()
+ expected = np.abs(df)
+ assert_frame_equal(result, expected)
+
+class CheckIndexing(object):
+
+
+ def test_getitem(self):
+ self.assertRaises(Exception, self.panel4d.__getitem__, 'ItemQ')
+
+ def test_delitem_and_pop(self):
+ expected = self.panel4d['l2']
+ result = self.panel4d.pop('l2')
+ assert_panel_equal(expected, result)
+ self.assert_('l2' not in self.panel4d.labels)
+
+ del self.panel4d['l3']
+ self.assert_('l3' not in self.panel4d.labels)
+ self.assertRaises(Exception, self.panel4d.__delitem__, 'l3')
+
+ values = np.empty((4, 4, 4, 4))
+ values[0] = 0
+ values[1] = 1
+ values[2] = 2
+ values[3] = 3
+
+ panel4d = Panel4D(values, range(4), range(4), range(4), range(4))
+
+ # did we delete the right row?
+
+ panel4dc = panel4d.copy()
+ del panel4dc[0]
+ assert_panel_equal(panel4dc[1], panel4d[1])
+ assert_panel_equal(panel4dc[2], panel4d[2])
+ assert_panel_equal(panel4dc[3], panel4d[3])
+
+ panel4dc = panel4d.copy()
+ del panel4dc[1]
+ assert_panel_equal(panel4dc[0], panel4d[0])
+ assert_panel_equal(panel4dc[2], panel4d[2])
+ assert_panel_equal(panel4dc[3], panel4d[3])
+
+ panel4dc = panel4d.copy()
+ del panel4dc[2]
+ assert_panel_equal(panel4dc[1], panel4d[1])
+ assert_panel_equal(panel4dc[0], panel4d[0])
+ assert_panel_equal(panel4dc[3], panel4d[3])
+
+ panel4dc = panel4d.copy()
+ del panel4dc[3]
+ assert_panel_equal(panel4dc[1], panel4d[1])
+ assert_panel_equal(panel4dc[2], panel4d[2])
+ assert_panel_equal(panel4dc[0], panel4d[0])
+
+ def test_setitem(self):
+ ## LongPanel with one item
+ #lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
+ #self.assertRaises(Exception, self.panel.__setitem__,
+ # 'ItemE', lp)
+
+ # Panel
+ p = Panel(dict(ItemA = self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))
+ self.panel4d['l4'] = p
+ self.panel4d['l5'] = p
+
+ p2 = self.panel4d['l4']
+
+ assert_panel_equal(p, p2.reindex(items = p.items,
+ major_axis = p.major_axis,
+ minor_axis = p.minor_axis))
+
+ # scalar
+ self.panel4d['lG'] = 1
+ self.panel4d['lE'] = True
+ self.assert_(self.panel4d['lG'].values.dtype == np.int64)
+ self.assert_(self.panel4d['lE'].values.dtype == np.bool_)
+
+ # object dtype
+ self.panel4d['lQ'] = 'foo'
+ self.assert_(self.panel4d['lQ'].values.dtype == np.object_)
+
+ # boolean dtype
+ self.panel4d['lP'] = self.panel4d['l1'] > 0
+ self.assert_(self.panel4d['lP'].values.dtype == np.bool_)
+
+ def test_setitem_ndarray(self):
+ raise nose.SkipTest
+ # from pandas import DateRange, datetools
+
+ # timeidx = DateRange(start=datetime(2009,1,1),
+ # end=datetime(2009,12,31),
+ # offset=datetools.MonthEnd())
+ # lons_coarse = np.linspace(-177.5, 177.5, 72)
+ # lats_coarse = np.linspace(-87.5, 87.5, 36)
+ # P = Panel(items=timeidx, major_axis=lons_coarse, minor_axis=lats_coarse)
+ # data = np.random.randn(72*36).reshape((72,36))
+ # key = datetime(2009,2,28)
+ # P[key] = data#
+
+ # assert_almost_equal(P[key].values, data)
+
+ def test_major_xs(self):
+ ref = self.panel4d['l1']['ItemA']
+
+ idx = self.panel4d.major_axis[5]
+ xs = self.panel4d.major_xs(idx)
+
+ assert_series_equal(xs['l1'].T['ItemA'], ref.xs(idx))
+
+ # not contained
+ idx = self.panel4d.major_axis[0] - bday
+ self.assertRaises(Exception, self.panel4d.major_xs, idx)
+
+ def test_major_xs_mixed(self):
+ self.panel4d['l4'] = 'foo'
+ xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
+ self.assert_(xs['l1']['A'].dtype == np.float64)
+ self.assert_(xs['l4']['A'].dtype == np.object_)
+
+ def test_minor_xs(self):
+ ref = self.panel4d['l1']['ItemA']
+
+ idx = self.panel4d.minor_axis[1]
+ xs = self.panel4d.minor_xs(idx)
+
+ assert_series_equal(xs['l1'].T['ItemA'], ref[idx])
+
+ # not contained
+ self.assertRaises(Exception, self.panel4d.minor_xs, 'E')
+
+ def test_minor_xs_mixed(self):
+ self.panel4d['l4'] = 'foo'
+
+ xs = self.panel4d.minor_xs('D')
+ self.assert_(xs['l1'].T['ItemA'].dtype == np.float64)
+ self.assert_(xs['l4'].T['ItemA'].dtype == np.object_)
+
+ def test_xs(self):
+ l1 = self.panel4d.xs('l1', axis=0)
+ expected = self.panel4d['l1']
+ assert_panel_equal(l1, expected)
+
+ # not view by default
+ l1.values[:] = np.nan
+ self.assert_(not np.isnan(self.panel4d['l1'].values).all())
+
+ # but can get view
+ l1_view = self.panel4d.xs('l1', axis=0, copy=False)
+ l1_view.values[:] = np.nan
+ self.assert_(np.isnan(self.panel4d['l1'].values).all())
+
+ # mixed-type
+ self.panel4d['strings'] = 'foo'
+ self.assertRaises(Exception, self.panel4d.xs, 'D', axis=2,
+ copy=False)
+
+ def test_getitem_fancy_labels(self):
+ panel4d = self.panel4d
+
+ labels = panel4d.labels[[1, 0]]
+ items = panel4d.items[[1, 0]]
+ dates = panel4d.major_axis[::2]
+ cols = ['D', 'C', 'F']
+
+ # all 4 specified
+ assert_panel4d_equal(panel4d.ix[labels, items, dates, cols],
+ panel4d.reindex(labels=labels, items=items, major=dates, minor=cols))
+
+ # 3 specified
+ assert_panel4d_equal(panel4d.ix[:, items, dates, cols],
+ panel4d.reindex(items=items, major=dates, minor=cols))
+
+ # 2 specified
+ assert_panel4d_equal(panel4d.ix[:, :, dates, cols],
+ panel4d.reindex(major=dates, minor=cols))
+
+ assert_panel4d_equal(panel4d.ix[:, items, :, cols],
+ panel4d.reindex(items=items, minor=cols))
+
+ assert_panel4d_equal(panel4d.ix[:, items, dates, :],
+ panel4d.reindex(items=items, major=dates))
+
+ # only 1
+ assert_panel4d_equal(panel4d.ix[:, items, :, :],
+ panel4d.reindex(items=items))
+
+ assert_panel4d_equal(panel4d.ix[:, :, dates, :],
+ panel4d.reindex(major=dates))
+
+ assert_panel4d_equal(panel4d.ix[:, :, :, cols],
+ panel4d.reindex(minor=cols))
+
+ def test_getitem_fancy_slice(self):
+ pass
+
+ def test_getitem_fancy_ints(self):
+ pass
+
+ def test_getitem_fancy_xs(self):
+ raise nose.SkipTest
+ #self.assertRaises(NotImplementedError, self.panel4d.major_xs)
+ #self.assertRaises(NotImplementedError, self.panel4d.minor_xs)
+
+ def test_getitem_fancy_xs_check_view(self):
+ raise nose.SkipTest
+ # item = 'ItemB'
+ # date = self.panel.major_axis[5]
+ # col = 'C'
+
+ # # make sure it's always a view
+ # NS = slice(None, None)
+
+ # # DataFrames
+ # comp = assert_frame_equal
+ # self._check_view(item, comp)
+ # self._check_view((item, NS), comp)
+ # self._check_view((item, NS, NS), comp)
+ # self._check_view((NS, date), comp)
+ # self._check_view((NS, date, NS), comp)
+ # self._check_view((NS, NS, 'C'), comp)
+
+ # # Series
+ # comp = assert_series_equal
+ # self._check_view((item, date), comp)
+ # self._check_view((item, date, NS), comp)
+ # self._check_view((item, NS, 'C'), comp)
+ # self._check_view((NS, date, 'C'), comp)#
+
+ #def _check_view(self, indexer, comp):
+ # cp = self.panel.copy()
+ # obj = cp.ix[indexer]
+ # obj.values[:] = 0
+ # self.assert_((obj.values == 0).all())
+ # comp(cp.ix[indexer].reindex_like(obj), obj)
+
+ def test_get_value(self):
+ for label in self.panel4d.labels:
+ for item in self.panel4d.items:
+ for mjr in self.panel4d.major_axis[::2]:
+ for mnr in self.panel4d.minor_axis:
+ result = self.panel4d.get_value(label, item, mjr, mnr)
+ expected = self.panel4d[label][item][mnr][mjr]
+ assert_almost_equal(result, expected)
+
+ def test_set_value(self):
+ for label in self.panel4d.labels:
+ for item in self.panel4d.items:
+ for mjr in self.panel4d.major_axis[::2]:
+ for mnr in self.panel4d.minor_axis:
+ self.panel4d.set_value(label, item, mjr, mnr, 1.)
+ assert_almost_equal(self.panel4d[label][item][mnr][mjr], 1.)
+
+ # resize
+ res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
+ self.assert_(isinstance(res, Panel4D))
+ self.assert_(res is not self.panel4d)
+ self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)
+
+ res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
+ self.assert_(com.is_float_dtype(res3['l4'].values))
+
+class TestPanel4d(unittest.TestCase, CheckIndexing, SafeForSparse, SafeForLongAndSparse):
+
+ @classmethod
+ def assert_panel4d_equal(cls,x, y):
+ assert_panel4d_equal(x, y)
+
+ def setUp(self):
+ self.panel4d = tm.makePanel4D()
+ add_nans(self.panel4d)
+
+ def test_constructor(self):
+ # with BlockManager
+ panel4d = Panel4D(self.panel4d._data)
+ self.assert_(panel4d._data is self.panel4d._data)
+
+ panel4d = Panel4D(self.panel4d._data, copy=True)
+ self.assert_(panel4d._data is not self.panel4d._data)
+ assert_panel4d_equal(panel4d, self.panel4d)
+
+ # strings handled prop
+ #panel4d = Panel4D([[['foo', 'foo', 'foo',],
+ # ['foo', 'foo', 'foo']]])
+ #self.assert_(wp.values.dtype == np.object_)
+
+ vals = self.panel4d.values
+
+ # no copy
+ panel4d = Panel4D(vals)
+ self.assert_(panel4d.values is vals)
+
+ # copy
+ panel4d = Panel4D(vals, copy=True)
+ self.assert_(panel4d.values is not vals)
+
+ def test_constructor_cast(self):
+ zero_filled = self.panel4d.fillna(0)
+
+ casted = Panel4D(zero_filled._data, dtype=int)
+ casted2 = Panel4D(zero_filled.values, dtype=int)
+
+ exp_values = zero_filled.values.astype(int)
+ assert_almost_equal(casted.values, exp_values)
+ assert_almost_equal(casted2.values, exp_values)
+
+ # can't cast
+ data = [[['foo', 'bar', 'baz']]]
+ self.assertRaises(ValueError, Panel, data, dtype=float)
+
+ def test_constructor_empty_panel(self):
+ empty = Panel()
+ self.assert_(len(empty.items) == 0)
+ self.assert_(len(empty.major_axis) == 0)
+ self.assert_(len(empty.minor_axis) == 0)
+
+ def test_constructor_observe_dtype(self):
+ # GH #411
+ panel = Panel(items=range(3), major_axis=range(3),
+ minor_axis=range(3), dtype='O')
+ self.assert_(panel.values.dtype == np.object_)
+
+ def test_consolidate(self):
+ self.assert_(self.panel4d._data.is_consolidated())
+
+ self.panel4d['foo'] = 1.
+ self.assert_(not self.panel4d._data.is_consolidated())
+
+ panel4d = self.panel4d.consolidate()
+ self.assert_(panel4d._data.is_consolidated())
+
+ def test_ctor_dict(self):
+ l1 = self.panel4d['l1']
+ l2 = self.panel4d['l2']
+
+ d = {'A' : l1, 'B' : l2.ix[['ItemB'],:,:] }
+ #d2 = {'A' : itema._series, 'B' : itemb[5:]._series}
+ #d3 = {'A' : DataFrame(itema._series),
+ # 'B' : DataFrame(itemb[5:]._series)}
+
+ panel4d = Panel4D(d)
+ #wp2 = Panel.from_dict(d2) # nested Dict
+ #wp3 = Panel.from_dict(d3)
+ #self.assert_(wp.major_axis.equals(self.panel.major_axis))
+ assert_panel_equal(panel4d['A'], self.panel4d['l1'])
+ assert_frame_equal(panel4d.ix['B','ItemB',:,:], self.panel4d.ix['l2',['ItemB'],:,:]['ItemB'])
+
+ # intersect
+ #wp = Panel.from_dict(d, intersect=True)
+ #self.assert_(wp.major_axis.equals(itemb.index[5:]))
+
+ # use constructor
+ #assert_panel_equal(Panel(d), Panel.from_dict(d))
+ #assert_panel_equal(Panel(d2), Panel.from_dict(d2))
+ #assert_panel_equal(Panel(d3), Panel.from_dict(d3))
+
+ # cast
+ #dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
+ # for k, v in d.iteritems())
+ #result = Panel(dcasted, dtype=int)
+ #expected = Panel(dict((k, v.astype(int))
+ # for k, v in dcasted.iteritems()))
+ #assert_panel_equal(result, expected)
+
+ def test_constructor_dict_mixed(self):
+ data = dict((k, v.values) for k, v in self.panel4d.iterkv())
+ result = Panel4D(data)
+ exp_major = Index(np.arange(len(self.panel4d.major_axis)))
+ self.assert_(result.major_axis.equals(exp_major))
+
+ result = Panel4D(data,
+ labels = self.panel4d.labels,
+ items = self.panel4d.items,
+ major_axis = self.panel4d.major_axis,
+ minor_axis = self.panel4d.minor_axis)
+ assert_panel4d_equal(result, self.panel4d)
+
+ data['l2'] = self.panel4d['l2']
+ result = Panel4D(data)
+ assert_panel4d_equal(result, self.panel4d)
+
+ # corner, blow up
+ data['l2'] = data['l2']['ItemB']
+ self.assertRaises(Exception, Panel4D, data)
+
+ data['l2'] = self.panel4d['l2'].values[:, :, :-1]
+ self.assertRaises(Exception, Panel4D, data)
+
+ def test_constructor_resize(self):
+ data = self.panel4d._data
+ labels= self.panel4d.labels[:-1]
+ items = self.panel4d.items[:-1]
+ major = self.panel4d.major_axis[:-1]
+ minor = self.panel4d.minor_axis[:-1]
+
+ result = Panel4D(data, labels=labels, items=items, major_axis=major, minor_axis=minor)
+ expected = self.panel4d.reindex(labels=labels, items=items, major=major, minor=minor)
+ assert_panel4d_equal(result, expected)
+
+ result = Panel4D(data, items=items, major_axis=major)
+ expected = self.panel4d.reindex(items=items, major=major)
+ assert_panel4d_equal(result, expected)
+
+ result = Panel4D(data, items=items)
+ expected = self.panel4d.reindex(items=items)
+ assert_panel4d_equal(result, expected)
+
+ result = Panel4D(data, minor_axis=minor)
+ expected = self.panel4d.reindex(minor=minor)
+ assert_panel4d_equal(result, expected)
+
+ def test_from_dict_mixed_orient(self):
+ raise nose.SkipTest
+ # df = tm.makeDataFrame()
+ # df['foo'] = 'bar'
+
+ # data = {'k1' : df,
+ # 'k2' : df}
+
+ # panel = Panel.from_dict(data, orient='minor')
+
+ # self.assert_(panel['foo'].values.dtype == np.object_)
+ # self.assert_(panel['A'].values.dtype == np.float64)
+
+ def test_values(self):
+ self.assertRaises(Exception, Panel, np.random.randn(5, 5, 5),
+ range(5), range(5), range(4))
+
+ def test_conform(self):
+ p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
+ conformed = self.panel4d.conform(p)
+
+ assert(conformed.items.equals(self.panel4d.labels))
+ assert(conformed.major_axis.equals(self.panel4d.major_axis))
+ assert(conformed.minor_axis.equals(self.panel4d.minor_axis))
+
+ def test_reindex(self):
+ ref = self.panel4d['l2']
+
+ # labels
+ result = self.panel4d.reindex(labels=['l1','l2'])
+ assert_panel_equal(result['l2'], ref)
+
+ # items
+ result = self.panel4d.reindex(items=['ItemA', 'ItemB'])
+ assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])
+
+ # major
+ new_major = list(self.panel4d.major_axis[:10])
+ result = self.panel4d.reindex(major=new_major)
+ assert_frame_equal(result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))
+
+ # raise exception put both major and major_axis
+ self.assertRaises(Exception, self.panel4d.reindex,
+ major_axis=new_major, major=new_major)
+
+ # minor
+ new_minor = list(self.panel4d.minor_axis[:2])
+ result = self.panel4d.reindex(minor=new_minor)
+ assert_frame_equal(result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))
+
+ result = self.panel4d.reindex(labels=self.panel4d.labels,
+ items =self.panel4d.items,
+ major =self.panel4d.major_axis,
+ minor =self.panel4d.minor_axis)
+
+ assert(result.labels is self.panel4d.labels)
+ assert(result.items is self.panel4d.items)
+ assert(result.major_axis is self.panel4d.major_axis)
+ assert(result.minor_axis is self.panel4d.minor_axis)
+
+ self.assertRaises(Exception, self.panel4d.reindex)
+
+ # with filling
+ smaller_major = self.panel4d.major_axis[::5]
+ smaller = self.panel4d.reindex(major=smaller_major)
+
+ larger = smaller.reindex(major=self.panel4d.major_axis,
+ method='pad')
+
+ assert_panel_equal(larger.ix[:,:,self.panel4d.major_axis[1],:],
+ smaller.ix[:,:,smaller_major[0],:])
+
+ # don't necessarily copy
+ result = self.panel4d.reindex(major=self.panel4d.major_axis, copy=False)
+ self.assert_(result is self.panel4d)
+
+ def test_reindex_like(self):
+ # reindex_like
+ smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
+ items =self.panel4d.items[:-1],
+ major =self.panel4d.major_axis[:-1],
+ minor =self.panel4d.minor_axis[:-1])
+ smaller_like = self.panel4d.reindex_like(smaller)
+ assert_panel4d_equal(smaller, smaller_like)
+
+ def test_take(self):
+ raise nose.SkipTest
+
+ # # axis == 0
+ # result = self.panel.take([2, 0, 1], axis=0)
+ # expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
+ # assert_panel_equal(result, expected)#
+
+ # # axis >= 1
+ # result = self.panel.take([3, 0, 1, 2], axis=2)
+ # expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
+ # assert_panel_equal(result, expected)
+
+ # self.assertRaises(Exception, self.panel.take, [3, -1, 1, 2], axis=2)
+ # self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
+
+ def test_sort_index(self):
+ import random
+
+ rlabels= list(self.panel4d.labels)
+ ritems = list(self.panel4d.items)
+ rmajor = list(self.panel4d.major_axis)
+ rminor = list(self.panel4d.minor_axis)
+ random.shuffle(rlabels)
+ random.shuffle(ritems)
+ random.shuffle(rmajor)
+ random.shuffle(rminor)
+
+ random_order = self.panel4d.reindex(labels=rlabels)
+ sorted_panel4d = random_order.sort_index(axis=0)
+ assert_panel4d_equal(sorted_panel4d, self.panel4d)
+
+ # descending
+ #random_order = self.panel.reindex(items=ritems)
+ #sorted_panel = random_order.sort_index(axis=0, ascending=False)
+ #assert_panel_equal(sorted_panel,
+ # self.panel.reindex(items=self.panel.items[::-1]))
+
+ #random_order = self.panel.reindex(major=rmajor)
+ #sorted_panel = random_order.sort_index(axis=1)
+ #assert_panel_equal(sorted_panel, self.panel)
+
+ #random_order = self.panel.reindex(minor=rminor)
+ #sorted_panel = random_order.sort_index(axis=2)
+ #assert_panel_equal(sorted_panel, self.panel)
+
+ def test_fillna(self):
+ filled = self.panel4d.fillna(0)
+ self.assert_(np.isfinite(filled.values).all())
+
+ filled = self.panel4d.fillna(method='backfill')
+ assert_panel_equal(filled['l1'],
+ self.panel4d['l1'].fillna(method='backfill'))
+
+ panel4d = self.panel4d.copy()
+ panel4d['str'] = 'foo'
+
+ filled = panel4d.fillna(method='backfill')
+ assert_panel_equal(filled['l1'],
+ panel4d['l1'].fillna(method='backfill'))
+
+ empty = self.panel4d.reindex(labels=[])
+ filled = empty.fillna(0)
+ assert_panel4d_equal(filled, empty)
+
+ def test_swapaxes(self):
+ result = self.panel4d.swapaxes('labels','items')
+ self.assert_(result.items is self.panel4d.labels)
+
+ result = self.panel4d.swapaxes('labels','minor')
+ self.assert_(result.labels is self.panel4d.minor_axis)
+
+ result = self.panel4d.swapaxes('items', 'minor')
+ self.assert_(result.items is self.panel4d.minor_axis)
+
+ result = self.panel4d.swapaxes('items', 'major')
+ self.assert_(result.items is self.panel4d.major_axis)
+
+ result = self.panel4d.swapaxes('major', 'minor')
+ self.assert_(result.major_axis is self.panel4d.minor_axis)
+
+ # this should also work
+ result = self.panel4d.swapaxes(0, 1)
+ self.assert_(result.labels is self.panel4d.items)
+
+ # this should also work
+ self.assertRaises(Exception, self.panel4d.swapaxes, 'items', 'items')
+
+ def test_to_frame(self):
+ raise nose.SkipTest
+ # # filtered
+ # filtered = self.panel.to_frame()
+ # expected = self.panel.to_frame().dropna(how='any')
+ # assert_frame_equal(filtered, expected)
+
+ # # unfiltered
+ # unfiltered = self.panel.to_frame(filter_observations=False)
+ # assert_panel_equal(unfiltered.to_panel(), self.panel)
+
+ # # names
+ # self.assertEqual(unfiltered.index.names, ['major', 'minor'])
+
+ def test_to_frame_mixed(self):
+ raise nose.SkipTest
+ # panel = self.panel.fillna(0)
+ # panel['str'] = 'foo'
+ # panel['bool'] = panel['ItemA'] > 0
+
+ # lp = panel.to_frame()
+ # wp = lp.to_panel()
+ # self.assertEqual(wp['bool'].values.dtype, np.bool_)
+ # assert_frame_equal(wp['bool'], panel['bool'])
+
+ def test_filter(self):
+ pass
+
+ def test_apply(self):
+ pass
+
+ def test_compound(self):
+ raise nose.SkipTest
+ # compounded = self.panel.compound()
+
+ # assert_series_equal(compounded['ItemA'],
+ # (1 + self.panel['ItemA']).product(0) - 1)
+
+ def test_shift(self):
+ raise nose.SkipTest
+ # # major
+ # idx = self.panel.major_axis[0]
+ # idx_lag = self.panel.major_axis[1]
+
+ # shifted = self.panel.shift(1)
+
+ # assert_frame_equal(self.panel.major_xs(idx),
+ # shifted.major_xs(idx_lag))
+
+ # # minor
+ # idx = self.panel.minor_axis[0]
+ # idx_lag = self.panel.minor_axis[1]
+
+ # shifted = self.panel.shift(1, axis='minor')
+
+ # assert_frame_equal(self.panel.minor_xs(idx),
+ # shifted.minor_xs(idx_lag))
+
+ # self.assertRaises(Exception, self.panel.shift, 1, axis='items')
+
+ def test_multiindex_get(self):
+ raise nose.SkipTest
+ # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b',2)],
+ # names=['first', 'second'])
+ # wp = Panel(np.random.random((4,5,5)),
+ # items=ind,
+ # major_axis=np.arange(5),
+ # minor_axis=np.arange(5))
+ # f1 = wp['a']
+ # f2 = wp.ix['a']
+ # assert_panel_equal(f1, f2)
+
+ # self.assert_((f1.items == [1, 2]).all())
+ # self.assert_((f2.items == [1, 2]).all())
+
+ # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
+ # names=['first', 'second'])
+
+ def test_multiindex_blocks(self):
+ raise nose.SkipTest
+ # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
+ # names=['first', 'second'])
+ # wp = Panel(self.panel._data)
+ # wp.items = ind
+ # f1 = wp['a']
+ # self.assert_((f1.items == [1, 2]).all())
+
+ # f1 = wp[('b',1)]
+ # self.assert_((f1.columns == ['A', 'B', 'C', 'D']).all())
+
+ def test_repr_empty(self):
+ empty = Panel4D()
+ repr(empty)
+
+ def test_rename(self):
+ mapper = {
+ 'l1' : 'foo',
+ 'l2' : 'bar',
+ 'l3' : 'baz'
+ }
+
+ renamed = self.panel4d.rename_axis(mapper, axis=0)
+ exp = Index(['foo', 'bar', 'baz'])
+ self.assert_(renamed.labels.equals(exp))
+
+ renamed = self.panel4d.rename_axis(str.lower, axis=3)
+ exp = Index(['a', 'b', 'c', 'd'])
+ self.assert_(renamed.minor_axis.equals(exp))
+
+ # don't copy
+ renamed_nocopy = self.panel4d.rename_axis(mapper, axis=0, copy=False)
+ renamed_nocopy['foo'] = 3.
+ self.assert_((self.panel4d['l1'].values == 3).all())
+
+ def test_get_attr(self):
+ assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
+
+ def test_group_agg(self):
+ values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))
+ bounds = np.arange(5) * 2
+ f = lambda x: x.mean(axis=0)
+
+ agged = group_agg(values, bounds, f)
+
+ assert(agged[1][0] == 2.5)
+ assert(agged[2][0] == 4.5)
+
+ # test a function that doesn't aggregate
+ f2 = lambda x: np.zeros((2,2))
+ self.assertRaises(Exception, group_agg, values, bounds, f2)
+
+ def test_from_frame_level1_unsorted(self):
+ raise nose.SkipTest
+ # tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2),
+ # ('AAPL', 1), ('MSFT', 1)]
+ # midx = MultiIndex.from_tuples(tuples)
+ # df = DataFrame(np.random.rand(5,4), index=midx)
+ # p = df.to_panel()
+ # assert_frame_equal(p.minor_xs(2), df.ix[:,2].sort_index())
+
+ def test_to_excel(self):
+ raise nose.SkipTest
+ # try:
+ # import xlwt
+ # import xlrd
+ # import openpyxl
+ # except ImportError:
+ # raise nose.SkipTest
+
+ # for ext in ['xls', 'xlsx']:
+ # path = '__tmp__.' + ext
+ # self.panel.to_excel(path)
+ # reader = ExcelFile(path)
+ # for item, df in self.panel.iteritems():
+ # recdf = reader.parse(str(item),index_col=0)
+ # assert_frame_equal(df, recdf)
+ # os.remove(path)
+
+
+if __name__ == '__main__':
+ import nose
+ nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py
new file mode 100644
index 0000000000000..0d8a8c2023014
--- /dev/null
+++ b/pandas/tests/test_panelnd.py
@@ -0,0 +1,75 @@
+from datetime import datetime
+import os
+import operator
+import unittest
+import nose
+
+import numpy as np
+
+from pandas.core import panelnd
+from pandas.core.panel import Panel
+import pandas.core.common as com
+from pandas.util import py3compat
+
+from pandas.util.testing import (assert_panel_equal,
+ assert_panel4d_equal,
+ assert_frame_equal,
+ assert_series_equal,
+ assert_almost_equal)
+import pandas.util.testing as tm
+
+class TestPanelnd(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def test_4d_construction(self):
+
+ # create a 4D
+ Panel4D = panelnd.create_nd_panel_factory(
+ klass_name = 'Panel4D',
+ axis_orders = ['labels','items','major_axis','minor_axis'],
+ axis_slices = { 'items' : 'items', 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
+ slicer = Panel,
+ axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
+ stat_axis = 2)
+
+ p4d = Panel4D(dict(L1 = tm.makePanel(), L2 = tm.makePanel()))
+
+ def test_5d_construction(self):
+
+ # create a 4D
+ Panel4D = panelnd.create_nd_panel_factory(
+ klass_name = 'Panel4D',
+ axis_orders = ['labels1','items','major_axis','minor_axis'],
+ axis_slices = { 'items' : 'items', 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
+ slicer = Panel,
+ axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
+ stat_axis = 2)
+
+ p4d = Panel4D(dict(L1 = tm.makePanel(), L2 = tm.makePanel()))
+
+ # create a 5D
+ Panel5D = panelnd.create_nd_panel_factory(
+ klass_name = 'Panel5D',
+ axis_orders = [ 'cool1', 'labels1','items','major_axis','minor_axis'],
+ axis_slices = { 'labels1' : 'labels1', 'items' : 'items', 'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
+ slicer = Panel4D,
+ axis_aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
+ stat_axis = 2)
+
+ p5d = Panel5D(dict(C1 = p4d))
+
+ # slice back to 4d
+ results = p5d.ix['C1',:,:,0:3,:]
+ expected = p4d.ix[:,:,0:3,:]
+ assert_panel_equal(results['L1'], expected['L1'])
+
+ # test a transpose
+ #results = p5d.transpose(1,2,3,4,0)
+ #expected =
+
+if __name__ == '__main__':
+ import nose
+ nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
old mode 100644
new mode 100755
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3ee53a8c1b5da..aa692f4844c49 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -19,6 +19,7 @@
import pandas.core.series as series
import pandas.core.frame as frame
import pandas.core.panel as panel
+import pandas.core.panel4d as panel4d
from pandas import bdate_range
from pandas.tseries.index import DatetimeIndex
@@ -29,6 +30,7 @@
Series = series.Series
DataFrame = frame.DataFrame
Panel = panel.Panel
+Panel4D = panel4d.Panel4D
N = 30
K = 4
@@ -198,6 +200,18 @@ def assert_panel_equal(left, right, check_panel_type=False):
for col in right:
assert(col in left)
+def assert_panel4d_equal(left, right):
+ assert(left.labels.equals(right.labels))
+ assert(left.items.equals(right.items))
+ assert(left.major_axis.equals(right.major_axis))
+ assert(left.minor_axis.equals(right.minor_axis))
+
+ for col, series in left.iterkv():
+ assert(col in right)
+ assert_panel_equal(series, right[col])
+
+ for col in right:
+ assert(col in left)
def assert_contains_all(iterable, dic):
for k in iterable:
@@ -316,6 +330,8 @@ def makePanel():
data = dict((c, makeTimeDataFrame()) for c in cols)
return Panel.fromDict(data)
+def makePanel4D():
+ return Panel4D(dict(l1 = makePanel(), l2 = makePanel(), l3 = makePanel()))
def add_nans(panel):
I, J, N = panel.shape
@@ -324,6 +340,10 @@ def add_nans(panel):
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
+def add_nans_panel4d(panel4d):
+ for l, label in enumerate(panel4d.labels):
+ panel = panel4d[label]
+ add_nans(panel)
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
| ## Panel4D
( note - this superseeds the prior branch, FDPanel and this is 0.9rc1 compatible)
Panel4D is like a Panel object, but provides 4 dimensions
labels, items, major_axis, minor_axis
instead of using a dict of Panels to hold data, the Panel4D provides a convenient represenation in pandas space with named dimensions to allow easy axis swapping and slicing
# testing
tests/test_panel4d.py provides a similar methodology to test_panel.py
Panel4D required an overhall of many methods in panel.py and one change in core/index.py (regarding multi-indexing)
almost all methods in a Panel are extended to Panel4D (with the exception in that Panel
now allows a multi-axis on axis 0)
docstrings need to be refreshed a bit and made a bit more general
all tests that are not skipped pass (tested with 0.9rc1)
join is a work in progress
# further
panelnd.py provides a factory function for creation of generic panel-like ND structures with custom named dimensions
(this works, but not fully tested - examples are in the docstring)
| https://api.github.com/repos/pandas-dev/pandas/pulls/2242 | 2012-11-14T04:29:16Z | 2012-12-02T23:03:23Z | 2012-12-02T23:03:23Z | 2014-06-12T10:20:37Z |
BUG: Incorrect error message due to zero based levels. #2226 | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 1e4d6347aaeec..9638da8f418cf 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1364,9 +1364,10 @@ def _get_level_number(self, level):
raise Exception('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
+ # Note: levels are zero-based
elif level >= self.nlevels:
raise ValueError('Index has only %d levels, not %d'
- % (self.nlevels, level))
+ % (self.nlevels, level + 1))
return level
_tuples = None
| Simple fix in error message that assume levels starting at 1 instead of 0.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2231 | 2012-11-12T00:44:52Z | 2012-11-12T04:37:37Z | 2012-11-12T04:37:37Z | 2012-11-12T04:37:41Z |
use boolean indexing via getitem to trigger masking; add inplace keyword to where | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
old mode 100644
new mode 100755
index 31c1a09f409c3..c9184f148e5a9
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1775,9 +1775,8 @@ def __getitem__(self, key):
elif isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
elif isinstance(key, DataFrame):
- values = key.values
- if values.dtype == bool:
- return self.values[values]
+ if key.values.dtype == bool:
+ return self.where(key)
else:
raise ValueError('Cannot index using non-boolean DataFrame')
else:
@@ -1871,11 +1870,6 @@ def __setitem__(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, DataFrame):
- if not (key.index.equals(self.index) and
- key.columns.equals(self.columns)):
- raise PandasError('Can only index with like-indexed '
- 'DataFrame objects')
-
self._boolean_set(key, value)
elif isinstance(key, (np.ndarray, list)):
return self._set_item_multiple(key, value)
@@ -1884,18 +1878,13 @@ def __setitem__(self, key, value):
self._set_item(key, value)
def _boolean_set(self, key, value):
- mask = key.values
- if mask.dtype != np.bool_:
+ if key.values.dtype != np.bool_:
raise ValueError('Must pass DataFrame with boolean values only')
if self._is_mixed_type:
raise ValueError('Cannot do boolean setting on mixed-type frame')
- if isinstance(value, DataFrame):
- assert(value._indexed_same(self))
- np.putmask(self.values, mask, value.values)
- else:
- self.values[mask] = value
+ self.where(key, value, inplace=True)
def _set_item_multiple(self, keys, value):
if isinstance(value, DataFrame):
@@ -4878,7 +4867,7 @@ def combineMult(self, other):
"""
return self.mul(other, fill_value=1.)
- def where(self, cond, other):
+ def where(self, cond, other=NA, inplace=False):
"""
Return a DataFrame with the same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from other.
@@ -4893,6 +4882,9 @@ def where(self, cond, other):
-------
wh: DataFrame
"""
+ if not hasattr(cond,'shape'):
+ raise ValueError('where requires an ndarray like object for its condition')
+
if isinstance(cond, np.ndarray):
if cond.shape != self.shape:
raise ValueError('Array onditional must be same shape as self')
@@ -4905,13 +4897,17 @@ def where(self, cond, other):
if isinstance(other, DataFrame):
_, other = self.align(other, join='left', fill_value=NA)
+ if inplace:
+ np.putmask(self.values, cond, other)
+ return self
+
rs = np.where(cond, self, other)
return self._constructor(rs, self.index, self.columns)
-
+
def mask(self, cond):
"""
Returns copy of self whose values are replaced with nan if the
- corresponding entry in cond is False
+ inverted condition is True
Parameters
----------
@@ -4921,7 +4917,7 @@ def mask(self, cond):
-------
wh: DataFrame
"""
- return self.where(cond, NA)
+ return self.where(~cond, NA)
_EMPTY_SERIES = Series([])
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
old mode 100644
new mode 100755
index 0b36e8d39a00a..dcc7bcb909cd4
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -141,6 +141,12 @@ def test_getitem_boolean(self):
self.assertRaises(ValueError, self.tsframe.__getitem__, self.tsframe)
+ # test df[df >0] works
+ bif = self.tsframe[self.tsframe > 0]
+ bifw = DataFrame(np.where(self.tsframe>0,self.tsframe,np.nan),index=self.tsframe.index,columns=self.tsframe.columns)
+ self.assert_(isinstance(bif,DataFrame))
+ self.assert_(bif.shape == self.tsframe.shape)
+ assert_frame_equal(bif,bifw)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3,4))
@@ -278,7 +284,11 @@ def test_setitem_boolean(self):
values[values == 5] = 0
assert_almost_equal(df.values, values)
- self.assertRaises(Exception, df.__setitem__, df[:-1] > 0, 2)
+ # a df that needs alignment first
+ df[df[:-1]<0] = 2
+ np.putmask(values[:-1],values[:-1]<0,2)
+ assert_almost_equal(df.values, values)
+
self.assertRaises(Exception, df.__setitem__, df * 0, 2)
# index with DataFrame
@@ -5204,14 +5214,24 @@ def test_where(self):
for k, v in rs.iteritems():
assert_series_equal(v, np.where(cond[k], df[k], other5))
- assert_frame_equal(rs, df.mask(cond))
-
err1 = (df + 1).values[0:2, :]
self.assertRaises(ValueError, df.where, cond, err1)
err2 = cond.ix[:2, :].values
self.assertRaises(ValueError, df.where, err2, other1)
+ # invalid conditions
+ self.assertRaises(ValueError, df.mask, True)
+ self.assertRaises(ValueError, df.mask, 0)
+
+ def test_mask(self):
+ df = DataFrame(np.random.randn(5, 3))
+ cond = df > 0
+
+ rs = df.where(cond, np.nan)
+ assert_frame_equal(rs, df.mask(df <= 0))
+ assert_frame_equal(rs, df.mask(~cond))
+
#----------------------------------------------------------------------
# Transposing
| in core/frame.py
changed method _getitem_ to use _mask_ directly (e.g. df.mask(df > 0) is equivalent semantically to df[df>0])
this would be a small API change as before df[df >0] returned a boolean np array
added inplace keyword to _where_ method (to update the dataframe in place, default is NOT to use inplace, and return a new dataframe)
changed method _boolean_set_ to use where and inplace=True (this allows alignment of the passed values and is slightly less strict than the current method)
all tests pass (as well as an added test in boolean frame indexing)
if included in 0.9.1 would be great (sorry for the late addition)
| https://api.github.com/repos/pandas-dev/pandas/pulls/2230 | 2012-11-11T21:19:09Z | 2012-11-13T22:52:23Z | 2012-11-13T22:52:23Z | 2014-06-12T19:22:25Z |
Unicode : change df.to_string() and friends to always return unicode objects | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 13e504a8e1f88..f2999c63db38e 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -36,7 +36,7 @@
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
- default None
+ default None, if the result is a string , it must be a unicode string.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
@@ -62,7 +62,7 @@ class SeriesFormatter(object):
def __init__(self, series, buf=None, header=True, length=True,
na_rep='NaN', name=False, float_format=None):
self.series = series
- self.buf = buf if buf is not None else StringIO()
+ self.buf = buf if buf is not None else StringIO(u"")
self.name = name
self.na_rep = na_rep
self.length = length
@@ -112,7 +112,7 @@ def to_string(self):
series = self.series
if len(series) == 0:
- return ''
+ return u''
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
@@ -135,9 +135,7 @@ def to_string(self):
if footer:
result.append(footer)
- if py3compat.PY3:
- return unicode(u'\n'.join(result))
- return com.console_encode(u'\n'.join(result))
+ return unicode(u'\n'.join(result))
if py3compat.PY3: # pragma: no cover
_encode_diff = lambda x: 0
@@ -200,10 +198,15 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
else:
self.columns = frame.columns
- def _to_str_columns(self, force_unicode=False):
+ def _to_str_columns(self, force_unicode=None):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
+ import warnings
+ if force_unicode is not None: # pragma: no cover
+ warnings.warn("force_unicode is deprecated, it will have no effect",
+ FutureWarning)
+
# may include levels names also
str_index = self._get_formatted_index()
str_columns = self._get_formatted_column_labels()
@@ -237,32 +240,17 @@ def _to_str_columns(self, force_unicode=False):
if self.index:
strcols.insert(0, str_index)
- if not py3compat.PY3:
- if force_unicode:
- def make_unicode(x):
- if isinstance(x, unicode):
- return x
- return x.decode('utf-8')
- strcols = map(lambda col: map(make_unicode, col), strcols)
- else:
- # Generally everything is plain strings, which has ascii
- # encoding. Problem is when there is a char with value over
- # 127. Everything then gets converted to unicode.
- try:
- map(lambda col: map(str, col), strcols)
- except UnicodeError:
- def make_unicode(x):
- if isinstance(x, unicode):
- return x
- return x.decode('utf-8')
- strcols = map(lambda col: map(make_unicode, col), strcols)
-
return strcols
- def to_string(self, force_unicode=False):
+ def to_string(self, force_unicode=None):
"""
Render a DataFrame to a console-friendly tabular output.
"""
+ import warnings
+ if force_unicode is not None: # pragma: no cover
+ warnings.warn("force_unicode is deprecated, it will have no effect",
+ FutureWarning)
+
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
@@ -272,15 +260,20 @@ def to_string(self, force_unicode=False):
com.pprint_thing(frame.index)))
text = info_line
else:
- strcols = self._to_str_columns(force_unicode)
+ strcols = self._to_str_columns()
text = adjoin(1, *strcols)
self.buf.writelines(text)
- def to_latex(self, force_unicode=False, column_format=None):
+ def to_latex(self, force_unicode=None, column_format=None):
"""
Render a DataFrame to a LaTeX tabular environment output.
"""
+ import warnings
+ if force_unicode is not None: # pragma: no cover
+ warnings.warn("force_unicode is deprecated, it will have no effect",
+ FutureWarning)
+
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
@@ -289,7 +282,7 @@ def to_latex(self, force_unicode=False, column_format=None):
frame.columns, frame.index))
strcols = [[info_line]]
else:
- strcols = self._to_str_columns(force_unicode)
+ strcols = self._to_str_columns()
if column_format is None:
column_format = '|l|%s|' % '|'.join('c' for _ in strcols)
@@ -726,18 +719,10 @@ def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
self.justify = justify
def get_result(self):
- if self._have_unicode():
- fmt_values = self._format_strings(use_unicode=True)
- else:
- fmt_values = self._format_strings(use_unicode=False)
-
+ fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
- def _have_unicode(self):
- mask = lib.map_infer(self.values, lambda x: isinstance(x, unicode))
- return mask.any()
-
- def _format_strings(self, use_unicode=False):
+ def _format_strings(self):
if self.float_format is None:
float_format = print_config.float_format
if float_format is None:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f7f296e822e15..a160c994e94a9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -612,20 +612,51 @@ def _need_info_repr_(self):
else:
return False
- def __repr__(self):
+ def __str__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
"""
Return a string representation for a particular DataFrame
+
+ Invoked by bytes(df) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ return com.console_encode(self.__unicode__())
+
+ def __unicode__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
"""
- buf = StringIO()
+ buf = StringIO(u"")
if self._need_info_repr_():
self.info(buf=buf, verbose=self._verbose_info)
else:
self.to_string(buf=buf)
+
value = buf.getvalue()
+ assert type(value) == unicode
- if py3compat.PY3:
- return unicode(value)
- return com.console_encode(value)
+ return value
+
+ def __repr__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
def _repr_html_(self):
"""
@@ -1379,19 +1410,21 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, nanRep=None,
- index_names=True, justify=None, force_unicode=False):
+ index_names=True, justify=None, force_unicode=None):
"""
Render a DataFrame to a console-friendly tabular output.
"""
+ import warnings
+ if force_unicode is not None: # pragma: no cover
+ warnings.warn("force_unicode is deprecated, it will have no effect",
+ FutureWarning)
if nanRep is not None: # pragma: no cover
- import warnings
warnings.warn("nanRep is deprecated, use na_rep",
FutureWarning)
na_rep = nanRep
if colSpace is not None: # pragma: no cover
- import warnings
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
@@ -1404,15 +1437,10 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
justify=justify,
index_names=index_names,
header=header, index=index)
- formatter.to_string(force_unicode=force_unicode)
+ formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
- if not force_unicode:
- try:
- result = str(result)
- except ValueError:
- pass
return result
@Appender(fmt.docstring_to_string, indents=1)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index b7792309f66ff..133449d79d521 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -132,12 +132,48 @@ def __array_finalize__(self, obj):
def _shallow_copy(self):
return self.view()
- def __repr__(self):
+ def __str__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
if py3compat.PY3:
- prepr = com.pprint_thing(self)
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by bytes(df) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ return com.console_encode(self.__unicode__())
+
+ def __unicode__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
+ """
+ if len(self) > 6 and len(self) > np.get_printoptions()['threshold']:
+ data = self[:3].tolist() + ["..."] + self[-3:].tolist()
else:
- prepr = com.pprint_thing_encoded(self)
- return 'Index(%s, dtype=%s)' % (prepr, self.dtype)
+ data = self
+
+ prepr = com.pprint_thing(data)
+ return '%s(%s, dtype=%s)' % (type(self).__name__, prepr, self.dtype)
+
+ def __repr__(self):
+ """
+ Return a string representation for a particular Index
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
@@ -207,15 +243,6 @@ def summary(self, name=None):
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
- def __str__(self):
- try:
- return np.array_repr(self.values)
- except UnicodeError:
- converted = u','.join(com.pprint_thing(x) for x in self.values)
- result = u'%s([%s], dtype=''%s'')' % (type(self).__name__, converted,
- str(self.values.dtype))
- return com.console_encode(result)
-
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
@@ -394,8 +421,8 @@ def format(self, name=False):
result = []
for dt in self:
if dt.time() != zero_time or dt.tzinfo is not None:
- return header + ['%s' % x for x in self]
- result.append('%d-%.2d-%.2d' % (dt.year, dt.month, dt.day))
+ return header + [u'%s' % x for x in self]
+ result.append(u'%d-%.2d-%.2d' % (dt.year, dt.month, dt.day))
return header + result
values = self.values
@@ -1319,7 +1346,33 @@ def _array_values(self):
def dtype(self):
return np.dtype('O')
- def __repr__(self):
+ def __str__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by bytes(df) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ return com.console_encode(self.__unicode__())
+
+ def __unicode__(self):
+ """
+ Return a string representation for a particular Index
+
+ Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
+ """
output = 'MultiIndex\n%s'
options = np.get_printoptions()
@@ -1335,10 +1388,15 @@ def __repr__(self):
np.set_printoptions(threshold=options['threshold'])
- if py3compat.PY3:
- return output % summary
- else:
- return com.console_encode(output % summary)
+ return output % summary
+
+ def __repr__(self):
+ """
+ Return a string representation for a particular Index
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
def __len__(self):
return len(self.labels[0])
@@ -1496,7 +1554,7 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
formatted = lev.take(lab).format()
else:
# weird all NA case
- formatted = [str(x) for x in com.take_1d(lev.values, lab)]
+ formatted = [com.pprint_thing(x) for x in com.take_1d(lev.values, lab)]
stringified_levels.append(formatted)
result_levels = []
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 2dca8a2aef801..ae4a5d868b139 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -386,34 +386,70 @@ def __array_wrap__(self, result):
#----------------------------------------------------------------------
# Magic methods
- def __repr__(self):
+ def __str__(self):
+ """
+ Return a string representation for a particular Panel
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
+ """
+ Return a string representation for a particular Panel
+
+ Invoked by bytes(df) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ return com.console_encode(self.__unicode__())
+
+ def __unicode__(self):
+ """
+ Return a string representation for a particular Panel
+
+ Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
+ """
+
class_name = str(self.__class__)
I, N, K = len(self.items), len(self.major_axis), len(self.minor_axis)
- dims = 'Dimensions: %d (items) x %d (major) x %d (minor)' % (I, N, K)
+ dims = u'Dimensions: %d (items) x %d (major) x %d (minor)' % (I, N, K)
if len(self.major_axis) > 0:
- major = 'Major axis: %s to %s' % (self.major_axis[0],
+ major = u'Major axis: %s to %s' % (self.major_axis[0],
self.major_axis[-1])
else:
- major = 'Major axis: None'
+ major = u'Major axis: None'
if len(self.minor_axis) > 0:
- minor = 'Minor axis: %s to %s' % (self.minor_axis[0],
- self.minor_axis[-1])
+ minor = u'Minor axis: %s to %s' % (com.pprint_thing(self.minor_axis[0]),
+ com.pprint_thing(self.minor_axis[-1]))
else:
- minor = 'Minor axis: None'
+ minor = u'Minor axis: None'
if len(self.items) > 0:
- items = 'Items: %s to %s' % (self.items[0], self.items[-1])
+ items = u'Items: %s to %s' % (com.pprint_thing(self.items[0]),
+ com.pprint_thing(self.items[-1]))
else:
- items = 'Items: None'
+ items = u'Items: None'
- output = '%s\n%s\n%s\n%s\n%s' % (class_name, dims, items, major, minor)
+ output = u'%s\n%s\n%s\n%s\n%s' % (class_name, dims, items, major, minor)
return output
+ def __repr__(self):
+ """
+ Return a string representation for a particular Panel
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
+
def __iter__(self):
return iter(self.items)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3241044a63c68..dc7588847775b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -858,8 +858,34 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
return df.reset_index(level=level, drop=drop)
- def __repr__(self):
- """Clean string representation of a Series"""
+
+ def __str__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Invoked by bytes(df) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ return com.console_encode(self.__unicode__())
+
+ def __unicode__(self):
+ """
+ Return a string representation for a particular DataFrame
+
+ Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
+ """
width, height = get_terminal_size()
max_rows = (height if fmt.print_config.max_rows == 0
else fmt.print_config.max_rows)
@@ -870,13 +896,24 @@ def __repr__(self):
length=len(self) > 50,
name=True)
else:
- result = '%s' % ndarray.__repr__(self)
+ result = com.pprint_thing(self)
- if py3compat.PY3:
- return unicode(result)
- return com.console_encode(result)
+ assert type(result) == unicode
+ return result
+
+ def __repr__(self):
+ """
+ Return a string representation for a particular Series
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
def _tidy_repr(self, max_vals=20):
+ """
+
+ Internal function, should always return unicode string
+ """
num = max_vals // 2
head = self[:num]._get_repr(print_header=True, length=False,
name=False)
@@ -884,11 +921,13 @@ def _tidy_repr(self, max_vals=20):
length=False,
name=False)
result = head + '\n...\n' + tail
- return '%s\n%s' % (result, self._repr_footer())
+ result = '%s\n%s' % (result, self._repr_footer())
+
+ return unicode(result)
def _repr_footer(self):
- namestr = "Name: %s, " % com.pprint_thing(self.name) if self.name is not None else ""
- return '%sLength: %d' % (namestr, len(self))
+ namestr = u"Name: %s, " % com.pprint_thing(self.name) if self.name is not None else ""
+ return u'%sLength: %d' % (namestr, len(self))
def to_string(self, buf=None, na_rep='NaN', float_format=None,
nanRep=None, length=False, name=False):
@@ -921,6 +960,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,
length=length, name=name)
+
+ assert type(the_repr) == unicode
+
if buf is None:
return the_repr
else:
@@ -928,13 +970,17 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
def _get_repr(self, name=False, print_header=False, length=True,
na_rep='NaN', float_format=None):
+ """
+
+ Internal function, should always return unicode string
+ """
+
formatter = fmt.SeriesFormatter(self, name=name, header=print_header,
length=length, na_rep=na_rep,
float_format=float_format)
- return formatter.to_string()
-
- def __str__(self):
- return repr(self)
+ result = formatter.to_string()
+ assert type(result) == unicode
+ return result
def __iter__(self):
if np.issubdtype(self.dtype, np.datetime64):
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 10bb75bfbb5b6..0b5182acb7f72 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -135,7 +135,7 @@ def test_to_string_unicode_columns(self):
df.info(buf=buf)
buf.getvalue()
- result = self.frame.to_string(force_unicode=True)
+ result = self.frame.to_string()
self.assert_(isinstance(result, unicode))
def test_to_string_unicode_two(self):
@@ -495,7 +495,6 @@ def test_to_string_int_formatting(self):
self.assert_(issubclass(df['x'].dtype.type, np.integer))
output = df.to_string()
- self.assert_(isinstance(output, str))
expected = (' x\n'
'0 -15\n'
'1 20\n'
@@ -841,16 +840,16 @@ def test_to_string(self):
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
- expected = ('0 foo\n'
- '1 NaN\n'
- '2 -1.23\n'
- '3 4.56')
+ expected = (u'0 foo\n'
+ u'1 NaN\n'
+ u'2 -1.23\n'
+ u'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
- expected = ('0 foo\n'
+ expected = (u'0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
@@ -858,7 +857,7 @@ def test_to_string_mixed(self):
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
- expected = ('0 foo\n'
+ expected = (u'0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
@@ -869,7 +868,7 @@ def test_to_string_float_na_spacing(self):
s[::2] = np.nan
result = s.to_string()
- expected = ('0 NaN\n'
+ expected = (u'0 NaN\n'
'1 1.5678\n'
'2 NaN\n'
'3 -3.0000\n'
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fea84f5a86e36..4eb1be94e0846 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -27,6 +27,7 @@
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
+from pandas.util import py3compat
import pandas.util.testing as tm
import pandas.lib as lib
@@ -2916,6 +2917,21 @@ def test_repr_unicode(self):
result = repr(df)
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
+ def test_unicode_string_with_unicode(self):
+ df = DataFrame({'A': [u"\u05d0"]})
+
+ if py3compat.PY3:
+ str(df)
+ else:
+ unicode(df)
+
+ def test_bytestring_with_unicode(self):
+ df = DataFrame({'A': [u"\u05d0"]})
+ if py3compat.PY3:
+ bytes(df)
+ else:
+ str(df)
+
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=[tm.rands(10) for _ in xrange(20)])
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index b94840d0dfd85..4a86db9d67196 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -851,6 +851,21 @@ def test_print_unicode_columns(self):
df=pd.DataFrame({u"\u05d0":[1,2,3],"\u05d1":[4,5,6],"c":[7,8,9]})
print(df.columns) # should not raise UnicodeDecodeError
+ def test_unicode_string_with_unicode(self):
+ idx = Index(range(1000))
+
+ if py3compat.PY3:
+ str(idx)
+ else:
+ unicode(idx)
+
+ def test_bytestring_with_unicode(self):
+ idx = Index(range(1000))
+ if py3compat.PY3:
+ bytes(idx)
+ else:
+ str(idx)
+
class TestMultiIndex(unittest.TestCase):
def setUp(self):
@@ -1680,6 +1695,24 @@ def test_repr_with_unicode_data(self):
index=pd.DataFrame(d).set_index(["a","b"]).index
self.assertFalse("\\u" in repr(index)) # we don't want unicode-escaped
+ def test_unicode_string_with_unicode(self):
+ d={"a":[u"\u05d0",2,3],"b":[4,5,6],"c":[7,8,9]}
+ idx=pd.DataFrame(d).set_index(["a","b"]).index
+
+ if py3compat.PY3:
+ str(idx)
+ else:
+ unicode(idx)
+
+ def test_bytestring_with_unicode(self):
+ d={"a":[u"\u05d0",2,3],"b":[4,5,6],"c":[7,8,9]}
+ idx=pd.DataFrame(d).set_index(["a","b"]).index
+
+ if py3compat.PY3:
+ bytes(idx)
+ else:
+ str(idx)
+
def test_get_combined_index():
from pandas.core.index import _get_combined_index
result = _get_combined_index([])
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index a906489e67b57..96de4784fdc99 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1043,6 +1043,11 @@ def test_repr(self):
rep_str = repr(ser)
self.assert_("Name: 0" in rep_str)
+ def test_tidy_repr(self):
+ a=Series([u"\u05d0"]*1000)
+ a.name= 'title1'
+ repr(a) # should not raise exception
+
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2,2)) for i in range(5)])
@@ -1078,6 +1083,22 @@ def test_repr_should_return_str (self):
df=Series(data,index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
+
+ def test_unicode_string_with_unicode(self):
+ df = Series([u"\u05d0"],name=u"\u05d1")
+ if py3compat.PY3:
+ str(df)
+ else:
+ unicode(df)
+
+ def test_bytestring_with_unicode(self):
+ df = Series([u"\u05d0"],name=u"\u05d1")
+ if py3compat.PY3:
+ bytes(df)
+ else:
+ str(df)
+
+
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
| closes #2225
**Note**: Although all the tests pass with minor fixes, this PR has an above-average chance of
breaking things for people who have relied on broken behaviour thus far.
`df.tidy_repr` combines several strings to produce a result. when one component is unicode
and other other is a non-ascii bytestring, it tries to convert the latter back to a unicode string
using the 'ascii' codec and fails.
I suggest that `_get_repr` -> `to_string` should always return unicode, as implemented by this PR,
and that the `force_unicode` argument be deprecated everyhwere.
The `force_unicode` argument in `to_string` conflates two things:
- which codec to use to decode the string (which can only be a hopeful guess)
- whether to return a unicode() object or str() object,
The first is now no longer necessary since `pprint_thing` already resorts to the same hack
of using utf-8 (with errors='replace') as a fallback.
I believe making the latter optional is wrong, precisely because it brings about situations
like the test case above.
`to_string`, like all internal functions , should utilize unicode objects, whenever feasible.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2224 | 2012-11-11T18:25:14Z | 2012-11-27T02:46:35Z | 2012-11-27T02:46:35Z | 2014-06-13T01:01:53Z |
CLN: use com._is_sequence instead of duplicating code | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 1ab2c3b7f8460..0cfb4004708fa 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -10,20 +10,9 @@
# "null slice"
_NS = slice(None, None)
-
-def _is_sequence(x):
- try:
- iter(x)
- assert(not isinstance(x, basestring))
- return True
- except Exception:
- return False
-
-
class IndexingError(Exception):
pass
-
class _NDFrameIndexer(object):
def __init__(self, obj):
@@ -149,7 +138,7 @@ def _align_series(self, indexer, ser):
if isinstance(indexer, tuple):
for i, idx in enumerate(indexer):
ax = self.obj.axes[i]
- if _is_sequence(idx) or isinstance(idx, slice):
+ if com._is_sequence(idx) or isinstance(idx, slice):
new_ix = ax[idx]
if ser.index.equals(new_ix):
return ser.values.copy()
@@ -174,7 +163,7 @@ def _align_frame(self, indexer, df):
idx, cols = None, None
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
- if _is_sequence(ix) or isinstance(ix, slice):
+ if com._is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix]
elif cols is None:
| https://api.github.com/repos/pandas-dev/pandas/pulls/2223 | 2012-11-11T12:46:01Z | 2012-11-12T16:23:10Z | 2012-11-12T16:23:10Z | 2012-11-12T16:23:20Z | |
fixes for #2218, #2219 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 31c1a09f409c3..05d3713375481 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -633,7 +633,8 @@ def keys(self):
def iteritems(self):
"""Iterator over (column, series) pairs"""
- return ((k, self[k]) for k in self.columns)
+ for i, k in enumerate(self.columns):
+ yield (k,self.take([i],axis=1)[k])
def iterrows(self):
"""
@@ -836,6 +837,10 @@ def to_dict(self, outtype='dict'):
-------
result : dict like {column -> {index -> value}}
"""
+ import warnings
+ if not self.columns.is_unique:
+ warnings.warn("DataFrame columns are not unique, some "
+ "columns will be omitted.",UserWarning)
if outtype.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in self.iteritems())
elif outtype.lower().startswith('l'):
@@ -1796,13 +1801,18 @@ def _getitem_array(self, key):
indexer = self.columns.get_indexer(key)
mask = indexer == -1
if mask.any():
- raise KeyError("No column(s) named: %s" % str(key[mask]))
+ raise KeyError("No column(s) named: %s" %
+ com.pprint_thing(key[mask]))
result = self.reindex(columns=key)
if result.columns.name is None:
result.columns.name = self.columns.name
return result
else:
mask = self.columns.isin(key)
+ for k in key:
+ if k not in self.columns:
+ raise KeyError("No column(s) named: %s" %
+ com.pprint_thing(k))
return self.take(mask.nonzero()[0], axis=1)
def _slice(self, slobj, axis=0):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0b36e8d39a00a..01b5d6ae46fd4 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -62,6 +62,15 @@ def test_getitem(self):
self.assert_('random' not in self.frame)
self.assertRaises(Exception, self.frame.__getitem__, 'random')
+ def test_getitem_dupe_cols(self):
+ df=DataFrame([[1,2,3],[4,5,6]],columns=['a','a','b'])
+ try:
+ df[['baf']]
+ except KeyError:
+ pass
+ else:
+ self.fail("Dataframe failed to raise KeyError")
+
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
@@ -1136,6 +1145,11 @@ def test_get_value(self):
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
+ def test_iteritems(self):
+ df=DataFrame([[1,2,3],[4,5,6]],columns=['a','a','b'])
+ for k,v in df.iteritems():
+ self.assertEqual(type(v),Series)
+
def test_lookup(self):
def alt(df, rows, cols):
result = []
@@ -7449,6 +7463,7 @@ def __nonzero__(self):
self.assert_(r0.all())
self.assert_(r1.all())
+
if __name__ == '__main__':
# unittest.main()
import nose
| 4a5b75b (the fix for #2219) triggers the issue in #2220 which makes a test fail,
afiact, that's a genuine issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2221 | 2012-11-11T12:30:20Z | 2012-11-13T23:27:47Z | 2012-11-13T23:27:47Z | 2014-07-07T19:30:39Z |
PR: unicode, mostly | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 7bbbaab49e864..46c28e8af52ac 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1140,6 +1140,9 @@ def pprint_thing(thing, _nest_lvl=0):
from pandas.core.format import print_config
if thing is None:
result = ''
+ elif (py3compat.PY3 and hasattr(thing,'__next__')) or \
+ hasattr(thing,'next'):
+ return unicode(thing)
elif (isinstance(thing, dict) and
_nest_lvl < print_config.pprint_nest_depth):
result = _pprint_dict(thing, _nest_lvl)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index aae911ba807ef..4505e6153a9a3 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -268,7 +268,8 @@ def to_string(self, force_unicode=False):
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u'Empty %s\nColumns: %s\nIndex: %s'
% (type(self.frame).__name__,
- frame.columns, frame.index))
+ com.pprint_thing(frame.columns),
+ com.pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns(force_unicode)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5a000485d85a4..2c3bc9a31c9b6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3915,11 +3915,12 @@ def _apply_standard(self, func, axis, ignore_failures=False):
try:
if hasattr(e, 'args'):
k = res_index[i]
- e.args = e.args + ('occurred at index %s' % str(k),)
+ e.args = e.args + ('occurred at index %s' %
+ com.pprint_thing(k),)
except (NameError, UnboundLocalError): # pragma: no cover
# no k defined yet
pass
- raise
+ raise e
if len(results) > 0 and _is_sequence(results[0]):
if not isinstance(results[0], Series):
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 1ba78c698a1b5..291502c406018 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -209,9 +209,10 @@ def __str__(self):
try:
return np.array_repr(self.values)
except UnicodeError:
- converted = u','.join(unicode(x) for x in self.values)
- return u'%s([%s], dtype=''%s'')' % (type(self).__name__, converted,
+ converted = u','.join(com.pprint_thing(x) for x in self.values)
+ result = u'%s([%s], dtype=''%s'')' % (type(self).__name__, converted,
str(self.values.dtype))
+ return com.console_encode(result)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
@@ -1320,11 +1321,15 @@ def __repr__(self):
self[-50:].values])
else:
values = self.values
- summary = np.array2string(values, max_line_width=70)
+
+ summary = com.pprint_thing(values)
np.set_printoptions(threshold=options['threshold'])
- return output % summary
+ if py3compat.PY3:
+ return output % summary
+ else:
+ return com.console_encode(output % summary)
def __len__(self):
return len(self.labels[0])
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 10a85c5592514..cd1ca8838d65d 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -49,9 +49,10 @@ def set_ref_items(self, ref_items, maybe_rename=True):
self.ref_items = ref_items
def __repr__(self):
- shape = ' x '.join([str(s) for s in self.shape])
+ shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
name = type(self).__name__
- return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
+ result = '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
+ return com.console_encode(result) # repr must return byte-string
def __contains__(self, item):
return item in self.items
@@ -935,7 +936,7 @@ def _find_block(self, item):
def _check_have(self, item):
if item not in self.items:
- raise KeyError('no item named %s' % str(item))
+ raise KeyError('no item named %s' % com.pprint_thing(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5c5fd1902c4cc..57799c6455fee 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1295,6 +1295,21 @@ def test_iget_value(self):
expected = self.frame.get_value(row, col)
assert_almost_equal(result, expected)
+ def test_nested_exception(self):
+ # Ignore the strange way of triggering the problem
+ # (which may get fixed), it's just a way to trigger
+ # the issue or reraising an outer exception without
+ # a named argument
+ df=DataFrame({"a":[1,2,3],"b":[4,5,6],"c":[7,8,9]}).set_index(["a","b"])
+ l=list(df.index)
+ l[0]=["a","b"]
+ df.index=l
+
+ try:
+ print df
+ except Exception,e:
+ self.assertNotEqual(type(e),UnboundLocalError)
+
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index c1d0894f9bfef..b94840d0dfd85 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -847,6 +847,10 @@ def test_int_name_format(self):
repr(s)
repr(df)
+ def test_print_unicode_columns(self):
+ df=pd.DataFrame({u"\u05d0":[1,2,3],"\u05d1":[4,5,6],"c":[7,8,9]})
+ print(df.columns) # should not raise UnicodeDecodeError
+
class TestMultiIndex(unittest.TestCase):
def setUp(self):
@@ -1671,6 +1675,10 @@ def test_tolist(self):
exp = list(self.index.values)
self.assertEqual(result, exp)
+ def test_repr_with_unicode_data(self):
+ d={"a":[u"\u05d0",2,3],"b":[4,5,6],"c":[7,8,9]}
+ index=pd.DataFrame(d).set_index(["a","b"]).index
+ self.assertFalse("\\u" in repr(index)) # we don't want unicode-escaped
def test_get_combined_index():
from pandas.core.index import _get_combined_index
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 57ccfff23e5de..e9c0b2ae980d6 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -408,6 +408,13 @@ def test_get_numeric_data(self):
self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool'])
+ def test_missing_unicode_key(self):
+ df=DataFrame({"a":[1]})
+ try:
+ df.ix[:,u"\u05d0"] # should not raise UnicodeEncodeError
+ except KeyError:
+ pass # this is the expected exception
+
if __name__ == '__main__':
# unittest.main()
import nose
| f2db4c1 fixes the `UnboundLocalError` mentioned in #2200.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2201 | 2012-11-08T23:41:48Z | 2012-11-09T17:29:02Z | 2012-11-09T17:29:02Z | 2014-06-16T22:34:21Z |
Updating help text for plot_series | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 00724a2dc35a0..98cf676c60a4d 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1301,7 +1301,9 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
Parameters
----------
label : label argument to provide to plot
- kind : {'line', 'bar'}
+ kind : {'line', 'bar', 'barh'}
+ bar : vertical bar plot
+ barh : horizontal bar plot
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
@@ -1312,9 +1314,6 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
- kind : {'line', 'bar', 'barh'}
- bar : vertical bar plot
- barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
| description for parameter 'kind' was given twice.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2195 | 2012-11-08T04:30:52Z | 2012-11-08T14:23:06Z | 2012-11-08T14:23:06Z | 2012-11-09T23:53:16Z |
BUG: start_time end_time to_timestamp bugs #2124 #2125 #1764 | diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index d7557e38c1680..763c34717abb1 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -4,7 +4,8 @@
from datetime import datetime, date
import numpy as np
-from pandas.tseries.frequencies import (get_freq_code as _gfc, to_offset,
+import pandas.tseries.offsets as offsets
+from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tools import parse_time_string
@@ -180,19 +181,21 @@ def asfreq(self, freq, how='E'):
@property
def start_time(self):
- return self.to_timestamp(how='S')
+ return self.to_timestamp('s', how='S')
@property
def end_time(self):
- return self.to_timestamp(how='E')
+ return self.to_timestamp('s', how='E')
- def to_timestamp(self, freq=None, how='S'):
+ def to_timestamp(self, freq=None, how='start'):
"""
- Return the Timestamp at the start/end of the period
+ Return the Timestamp representation of the Period at the target
+ frequency at the specified end (how) of the Period
Parameters
----------
- freq : string or DateOffset, default frequency of PeriodIndex
+ freq : string or DateOffset, default is 'D' if self.freq is week or
+ longer and 'S' otherwise
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
@@ -202,20 +205,16 @@ def to_timestamp(self, freq=None, how='S'):
-------
Timestamp
"""
+ how = _validate_end_alias(how)
+
if freq is None:
base, mult = _gfc(self.freq)
- how = _validate_end_alias(how)
- if how == 'S':
- base = _freq_mod.get_to_timestamp_base(base)
- freq = _freq_mod._get_freq_str(base)
- new_val = self.asfreq(freq, how)
- else:
- new_val = self
- else:
- base, mult = _gfc(freq)
- new_val = self.asfreq(freq, how)
+ freq = _freq_mod.get_to_timestamp_base(base)
+
+ base, mult = _gfc(freq)
+ val = self.asfreq(freq, how)
- dt64 = plib.period_ordinal_to_dt64(new_val.ordinal, base)
+ dt64 = plib.period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64)
year = _period_field_accessor('year', 0)
@@ -765,7 +764,8 @@ def to_timestamp(self, freq=None, how='start'):
Parameters
----------
- freq : string or DateOffset, default 'D'
+ freq : string or DateOffset, default 'D' for week or longer, 'S'
+ otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
@@ -773,12 +773,14 @@ def to_timestamp(self, freq=None, how='start'):
-------
DatetimeIndex
"""
+ how = _validate_end_alias(how)
+
if freq is None:
base, mult = _gfc(self.freq)
- new_data = self
- else:
- base, mult = _gfc(freq)
- new_data = self.asfreq(freq, how)
+ freq = _freq_mod.get_to_timestamp_base(base)
+
+ base, mult = _gfc(freq)
+ new_data = self.asfreq(freq, how)
new_data = plib.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 41dd949620fe4..9fcd8f630bcd2 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -215,12 +215,12 @@ def test_to_timestamp(self):
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
- self.assertEquals(start_ts, p.to_timestamp(how=a))
+ self.assertEquals(start_ts, p.to_timestamp('D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
- self.assertEquals(end_ts, p.to_timestamp(how=a))
+ self.assertEquals(end_ts, p.to_timestamp('D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
@@ -231,7 +231,7 @@ def test_to_timestamp(self):
self.assertEquals(p.start_time, p.to_timestamp(how='S'))
- self.assertEquals(p.end_time, p.to_timestamp(how='E'))
+ self.assertEquals(p.end_time, p.to_timestamp('s', how='E'))
# Frequency other than daily
@@ -245,8 +245,8 @@ def test_to_timestamp(self):
expected = datetime(1985, 12, 31, 23, 59)
self.assertEquals(result, expected)
- result = p.to_timestamp('S', how='end')
- expected = datetime(1985, 12, 31, 23, 59, 59)
+ result = p.to_timestamp(how='end')
+ expected = datetime(1985, 12, 31)
self.assertEquals(result, expected)
expected = datetime(1985, 1, 1)
@@ -272,28 +272,30 @@ def test_start_time(self):
def test_end_time(self):
p = Period('2012', freq='A')
- xp = datetime(2012, 12, 31)
+ xp = datetime(2012, 12, 31, 23, 59, 59)
self.assertEquals(xp, p.end_time)
p = Period('2012', freq='Q')
- xp = datetime(2012, 3, 31)
+ xp = datetime(2012, 3, 31, 23, 59, 59)
self.assertEquals(xp, p.end_time)
p = Period('2012', freq='M')
- xp = datetime(2012, 1, 31)
+ xp = datetime(2012, 1, 31, 23, 59, 59)
self.assertEquals(xp, p.end_time)
- xp = datetime(2012, 1, 1)
- freq_lst = ['D', 'H', 'T', 'S']
- for f in freq_lst:
- p = Period('2012', freq=f)
- self.assertEquals(p.end_time, xp)
+ xp = datetime(2012, 1, 1, 23, 59, 59)
+ p = Period('2012', freq='D')
+ self.assertEquals(p.end_time, xp)
+
+ xp = datetime(2012, 1, 1, 0, 59, 59)
+ p = Period('2012', freq='H')
+ self.assertEquals(p.end_time, xp)
self.assertEquals(Period('2012', freq='B').end_time,
- datetime(2011, 12, 30))
+ datetime(2011, 12, 30, 23, 59, 59))
self.assertEquals(Period('2012', freq='W').end_time,
- datetime(2012, 1, 1))
+ datetime(2012, 1, 1, 23, 59, 59))
def test_properties_annually(self):
@@ -1200,12 +1202,12 @@ def test_to_timestamp(self):
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
- result = series.to_timestamp('D', 'end')
+ result = series.to_timestamp(how='end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
- result = series.to_timestamp('D', 'start')
+ result = series.to_timestamp(how='start')
self.assert_(result.index.equals(exp_index))
@@ -1230,6 +1232,15 @@ def _get_with_delta(delta, freq='A-DEC'):
self.assertRaises(ValueError, index.to_timestamp, '5t')
+ index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
+ series = Series(1, index=index, name='foo')
+
+ exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
+ freq='H')
+ result = series.to_timestamp(how='end')
+ self.assert_(result.index.equals(exp_index))
+ self.assertEquals(result.name, 'foo')
+
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(range(1, 5), 40)
| @wesm can you review this plz?
The main change is I have to_timestamp default to second frequency now to get the first and last second of the Period. This doesn't **quite** solve #2125 since Timestamp resolution goes down to Nanos. So the question is is it appropriate to add 999999999ns to the end_time when ns doesn't exist asa period freq?
| https://api.github.com/repos/pandas-dev/pandas/pulls/2170 | 2012-11-03T20:49:25Z | 2012-11-04T21:18:12Z | 2012-11-04T21:18:12Z | 2014-07-13T09:26:33Z |
Plot color | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index efb3252a66209..35786c242082b 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -107,7 +107,7 @@ def test_bar_linewidth(self):
self.assert_(r.get_linewidth() == 2)
@slow
- def test_1rotation(self):
+ def test_rotation(self):
df = DataFrame(np.random.randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
@@ -447,6 +447,24 @@ def test_style_by_column(self):
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
+ @slow
+ def test_line_colors(self):
+ import matplotlib.pyplot as plt
+
+ custom_colors = 'rgcby'
+
+ plt.close('all')
+ df = DataFrame(np.random.randn(5, 5))
+
+ ax = df.plot(color=custom_colors)
+
+ lines = ax.get_lines()
+ for i, l in enumerate(lines):
+ xp = custom_colors[i]
+ rs = l.get_color()
+ self.assert_(xp == rs)
+
+
class TestDataFrameGroupByPlots(unittest.TestCase):
@classmethod
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 25bcff3c54545..23d3df8a9511c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2,6 +2,7 @@
# pylint: disable=E1101
from itertools import izip
import datetime
+import warnings
import re
import numpy as np
@@ -852,6 +853,14 @@ class LinePlot(MPLPlot):
def __init__(self, data, **kwargs):
self.mark_right = kwargs.pop('mark_right', True)
MPLPlot.__init__(self, data, **kwargs)
+ if 'color' not in self.kwds and 'colors' in self.kwds:
+ warnings.warn(("'colors' is being deprecated. Please use 'color'"
+ "instead of 'colors'"))
+ colors = self.kwds.pop('colors')
+ self.kwds['color'] = colors
+ if 'color' in self.kwds and isinstance(self.data, Series):
+ #support series.plot(color='green')
+ self.kwds['color'] = [self.kwds['color']]
def _index_freq(self):
from pandas.core.frame import DataFrame
@@ -889,14 +898,12 @@ def _use_dynamic_x(self):
def _get_colors(self):
import matplotlib.pyplot as plt
cycle = ''.join(plt.rcParams.get('axes.color_cycle', list('bgrcmyk')))
- has_colors = 'colors' in self.kwds
- colors = self.kwds.pop('colors', cycle)
- return has_colors, colors
-
- def _maybe_add_color(self, has_colors, colors, kwds, style, i):
- if (not has_colors and
- (style is None or re.match('[a-z]+', style) is None)
- and 'color' not in kwds):
+ has_colors = 'color' in self.kwds
+ colors = self.kwds.get('color', cycle)
+ return colors
+
+ def _maybe_add_color(self, colors, kwds, style, i):
+ if style is None or re.match('[a-z]+', style) is None:
kwds['color'] = colors[i % len(colors)]
def _make_plot(self):
@@ -910,13 +917,13 @@ def _make_plot(self):
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
- has_colors, colors = self._get_colors()
+ colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
- self._maybe_add_color(has_colors, colors, kwds, style, i)
+ self._maybe_add_color(colors, kwds, style, i)
label = com.pprint_thing(label) # .encode('utf-8')
@@ -944,7 +951,7 @@ def _make_plot(self):
def _make_ts_plot(self, data, **kwargs):
from pandas.tseries.plotting import tsplot
kwargs = kwargs.copy()
- has_colors, colors = self._get_colors()
+ colors = self._get_colors()
plotf = self._get_plot_function()
lines = []
@@ -960,7 +967,7 @@ def to_leg_label(label, i):
style = self.style or ''
label = com.pprint_thing(self.label)
kwds = kwargs.copy()
- self._maybe_add_color(has_colors, colors, kwds, style, 0)
+ self._maybe_add_color(colors, kwds, style, 0)
newlines = tsplot(data, plotf, ax=ax, label=label,
style=self.style, **kwds)
@@ -975,7 +982,7 @@ def to_leg_label(label, i):
style = self._get_style(i, col)
kwds = kwargs.copy()
- self._maybe_add_color(has_colors, colors, kwds, style, i)
+ self._maybe_add_color(colors, kwds, style, i)
newlines = tsplot(data[col], plotf, ax=ax, label=label,
style=style, **kwds)
@@ -1096,7 +1103,7 @@ def f(ax, x, y, w, start=None, **kwds):
return f
def _make_plot(self):
- colors = self.kwds.get('color', 'brgyk')
+ colors = self.kwds.pop('color', 'brgyk')
rects = []
labels = []
| Deprecate 'colors' parameter in LinePlot. Otherwise frame.plot(color='rgb') fails but frame.plot(kind='bar', color='rgb') works.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2169 | 2012-11-03T15:38:56Z | 2012-11-03T16:22:45Z | 2012-11-03T16:22:45Z | 2012-11-03T16:22:45Z |
PR: adding a core.config module to hold package-wide configurables | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 3760e3fbc434b..df37b44cc6a7d 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -22,6 +22,9 @@
from pandas.version import version as __version__
from pandas.info import __doc__
+# let init-time option registration happen
+import pandas.core.config_init
+
from pandas.core.api import *
from pandas.sparse.api import *
from pandas.stats.api import *
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 8cf3b7f4cbda4..469f3683113ec 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -29,3 +29,6 @@
# legacy
from pandas.core.daterange import DateRange # deprecated
import pandas.core.datetools as datetools
+
+from pandas.core.config import get_option,set_option,reset_option,\
+ reset_options,describe_options
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 02223b05fc2f9..c86ee34f26d47 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -19,6 +19,8 @@
from pandas.util.py3compat import StringIO, BytesIO
+from pandas.core.config import get_option
+
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
@@ -1113,7 +1115,7 @@ def in_interactive_session():
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
-# based on the encoding set in fmt.print_config.encoding.
+# based on the encoding set in option "print_config.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
@@ -1165,16 +1167,17 @@ def pprint_thing(thing, _nest_lvl=0):
result - unicode object on py2, str on py3. Always Unicode.
"""
- from pandas.core.format import print_config
+
if thing is None:
result = ''
elif (py3compat.PY3 and hasattr(thing,'__next__')) or \
hasattr(thing,'next'):
return unicode(thing)
elif (isinstance(thing, dict) and
- _nest_lvl < print_config.pprint_nest_depth):
+ _nest_lvl < get_option("print_config.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl)
- elif _is_sequence(thing) and _nest_lvl < print_config.pprint_nest_depth:
+ elif _is_sequence(thing) and _nest_lvl < \
+ get_option("print_config.pprint_nest_depth"):
result = _pprint_seq(thing, _nest_lvl)
else:
# when used internally in the package, everything
@@ -1202,7 +1205,6 @@ def pprint_thing_encoded(object, encoding='utf-8', errors='replace'):
def console_encode(object):
- from pandas.core.format import print_config
"""
this is the sanctioned way to prepare something for
sending *to the console*, it delegates to pprint_thing() to get
@@ -1210,4 +1212,5 @@ def console_encode(object):
set in print_config.encoding. Use this everywhere
where you output to the console.
"""
- return pprint_thing_encoded(object, print_config.encoding)
+ return pprint_thing_encoded(object,
+ get_option("print_config.encoding"))
diff --git a/pandas/core/config.py b/pandas/core/config.py
new file mode 100644
index 0000000000000..09c1a5f37383d
--- /dev/null
+++ b/pandas/core/config.py
@@ -0,0 +1,497 @@
+"""
+The config module holds package-wide configurables and provides
+a uniform API for working with them.
+"""
+
+"""
+Overview
+========
+
+This module supports the following requirements:
+- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
+- options can be registered by modules at import time.
+- options can be registered at init-time (via core.config_init)
+- options have a default value, and (optionally) a description and
+ validation function associated with them.
+- options can be deprecated, in which case referencing them
+ should produce a warning.
+- deprecated options can optionally be rerouted to a replacement
+ so that accessing a deprecated option reroutes to a differently
+ named option.
+- options can be reset to their default value.
+- all option can be reset to their default value at once.
+- all options in a certain sub - namespace can be reset at once.
+- the user can set / get / reset or ask for the description of an option.
+- a developer can register and mark an option as deprecated.
+
+Implementation
+==============
+
+- Data is stored using nested dictionaries, and should be accessed
+ through the provided API.
+
+- "Registered options" and "Deprecated options" have metadata associcated
+ with them, which are stored in auxilary dictionaries keyed on the
+ fully-qualified key, e.g. "x.y.z.option".
+
+- the config_init module is imported by the package's __init__.py file.
+ placing any register_option() calls there will ensure those options
+ are available as soon as pandas is loaded. If you use register_option
+ in a module, it will only be available after that module is imported,
+ which you should be aware of.
+
+- `config_prefix` is a context_manager (for use with the `with` keyword)
+ which can save developers some typing, see the docstring.
+
+"""
+
+import re
+
+from collections import namedtuple
+import warnings
+
+DeprecatedOption = namedtuple("DeprecatedOption", "key msg rkey removal_ver")
+RegisteredOption = namedtuple("RegisteredOption", "key defval doc validator")
+
+__deprecated_options = {} # holds deprecated option metdata
+__registered_options = {} # holds registered option metdata
+__global_config = {} # holds the current values for registered options
+
+##########################################
+# User API
+
+
+def get_option(key):
+ """Retrieves the value of the specified option
+
+ Parameters
+ ----------
+ key - str, a fully - qualified option name , e.g. "x.y.z.option"
+
+ Returns
+ -------
+ result - the value of the option
+
+ Raises
+ ------
+ KeyError if no such option exists
+ """
+
+ _warn_if_deprecated(key)
+ key = _translate_key(key)
+
+ # walk the nested dict
+ root, k = _get_root(key)
+
+ return root[k]
+
+
+def set_option(key, value):
+ """Sets the value of the specified option
+
+ Parameters
+ ----------
+ key - str, a fully - qualified option name , e.g. "x.y.z.option"
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ KeyError if no such option exists
+ """
+ _warn_if_deprecated(key)
+ key = _translate_key(key)
+
+ o = _get_registered_option(key)
+ if o and o.validator:
+ o.validator(value)
+
+ # walk the nested dict
+ root, k = _get_root(key)
+
+ root[k] = value
+
+
+def _get_option_desription(key):
+ """Prints the description associated with the specified option
+
+ Parameters
+ ----------
+ key - str, a fully - qualified option name , e.g. "x.y.z.option"
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ KeyError if no such option exists
+ """
+ _warn_if_deprecated(key)
+ key = _translate_key(key)
+
+def describe_options(pat="",_print_desc=True):
+ """ Prints the description for one or more registered options
+
+ Parameters
+ ----------
+ pat - str, a regexp pattern. All matching keys will have their
+ description displayed.
+
+ _print_desc - if True (default) the description(s) will be printed
+ to stdout otherwise, the description(s) will be returned
+ as a unicode string (for testing).
+
+ Returns
+ -------
+ None by default, the description(s) as a unicode string if _print_desc
+ is False
+
+ """
+ s=u""
+ if pat in __registered_options.keys(): # exact key name?
+ s = _build_option_description(pat)
+ else:
+ for k in sorted(__registered_options.keys()): # filter by pat
+ if re.search(pat,k):
+ s += _build_option_description(k)
+
+ if s == u"":
+ raise KeyError("No such keys(s)")
+
+ if _print_desc:
+ print(s)
+ else:
+ return(s)
+
+def reset_option(key):
+ """ Reset a single option to it's default value """
+ set_option(key, __registered_options[key].defval)
+
+
+def reset_options(prefix=""):
+ """ Resets all registered options to their default value
+
+ Parameters
+ ----------
+ prefix - str, if specified only options matching `prefix`* will be reset
+
+ Returns
+ -------
+ None
+
+ """
+
+ for k in __registered_options.keys():
+ if k[:len(prefix)] == prefix:
+ reset_option(k)
+
+
+######################################################
+# Functions for use by pandas developers, in addition to User - api
+
+
+def register_option(key, defval, doc="", validator=None):
+ """Register an option in the package-wide pandas config object
+
+ Parameters
+ ----------
+ key - a fully-qualified key, e.g. "x.y.option - z".
+ defval - the default value of the option
+ doc - a string description of the option
+ validator - a function of a single argument, should raise `ValueError` if
+ called with a value which is not a legal value for the option.
+
+ Returns
+ -------
+ Nothing.
+
+ Raises
+ ------
+ ValueError if `validator` is specified and `defval` is not a valid value.
+
+ """
+
+
+ if key in __registered_options:
+ raise KeyError("Option '%s' has already been registered" % key)
+
+ # the default value should be legal
+ if validator:
+ validator(defval)
+
+ # walk the nested dict, creating dicts as needed along the path
+ path = key.split(".")
+ cursor = __global_config
+ for i,p in enumerate(path[:-1]):
+ if not isinstance(cursor,dict):
+ raise KeyError("Path prefix to option '%s' is already an option" %\
+ ".".join(path[:i]))
+ if not cursor.has_key(p):
+ cursor[p] = {}
+ cursor = cursor[p]
+
+ if not isinstance(cursor,dict):
+ raise KeyError("Path prefix to option '%s' is already an option" %\
+ ".".join(path[:-1]))
+
+ cursor[path[-1]] = defval # initialize
+
+ # save the option metadata
+ __registered_options[key] = RegisteredOption(key=key, defval=defval,
+ doc=doc, validator=validator)
+
+
+def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
+ """
+ Mark option `key` as deprecated, if code attempts to access this option,
+ a warning will be produced, using `msg` if given, or a default message
+ if not.
+ if `rkey` is given, any access to the key will be re-routed to `rkey`.
+
+ Neither the existence of `key` nor that if `rkey` is checked. If they
+ do not exist, any subsequence access will fail as usual, after the
+ deprecation warning is given.
+
+ Parameters
+ ----------
+ key - the name of the option to be deprecated. must be a fully-qualified
+ option name (e.g "x.y.z.rkey").
+
+ msg - (Optional) a warning message to output when the key is referenced.
+ if no message is given a default message will be emitted.
+
+ rkey - (Optional) the name of an option to reroute access to.
+ If specified, any referenced `key` will be re-routed to `rkey`
+ including set/get/reset.
+ rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
+ used by the default message if no `msg` is specified.
+
+ removal_ver - (Optional) specifies the version in which this option will
+ be removed. used by the default message if no `msg`
+ is specified.
+
+ Returns
+ -------
+ Nothing
+
+ Raises
+ ------
+ KeyError - if key has already been deprecated.
+
+ """
+ if key in __deprecated_options:
+ raise KeyError("Option '%s' has already been defined as deprecated." % key)
+
+ __deprecated_options[key] = DeprecatedOption(key, msg, rkey,removal_ver)
+
+################################
+# functions internal to the module
+
+
+def _get_root(key):
+ path = key.split(".")
+ cursor = __global_config
+ for p in path[:-1]:
+ cursor = cursor[p]
+ return cursor, path[-1]
+
+
+def _is_deprecated(key):
+ """ Returns True if the given option has been deprecated """
+ return __deprecated_options.has_key(key)
+
+
+def _get_deprecated_option(key):
+ """
+ Retrieves the metadata for a deprecated option, if `key` is deprecated.
+
+ Returns
+ -------
+ DeprecatedOption (namedtuple) if key is deprecated, None otherwise
+ """
+ try:
+ d = __deprecated_options[key]
+ except KeyError:
+ return None
+ else:
+ return d
+
+
+def _get_registered_option(key):
+ """
+ Retrieves the option metadata if `key` is a registered option.
+
+ Returns
+ -------
+ RegisteredOption (namedtuple) if key is deprecated, None otherwise
+ """
+ try:
+ d = __registered_options[key]
+ except KeyError:
+ return None
+ else:
+ return d
+
+
+def _translate_key(key):
+ """
+ if key id deprecated and a replacement key defined, will return the
+ replacement key, otherwise returns `key` as - is
+ """
+ d = _get_deprecated_option(key)
+ if d:
+ return d.rkey or key
+ else:
+ return key
+
+
+def _warn_if_deprecated(key):
+ """
+ Checks if `key` is a deprecated option and if so, prints a warning.
+
+ Returns
+ -------
+ bool - True if `key` is deprecated, False otherwise.
+ """
+ d = _get_deprecated_option(key)
+ if d:
+ if d.msg:
+ warnings.warn(d.msg, DeprecationWarning)
+ else:
+ msg = "'%s' is deprecated" % key
+ if d.removal_ver:
+ msg += " and will be removed in %s" % d.removal_ver
+ if d.rkey:
+ msg += (", please use '%s' instead." % (d.rkey))
+ else:
+ msg += (", please refrain from using it.")
+
+ warnings.warn(msg, DeprecationWarning)
+ return True
+ return False
+
+def _build_option_description(k):
+ """ Builds a formatted description of a registered option and prints it """
+
+ o = _get_registered_option(k)
+ d = _get_deprecated_option(k)
+ s = u'%s: ' %k
+ if o.doc:
+ s += "\n" +"\n ".join(o.doc.split("\n"))
+ else:
+ s += "No description available.\n"
+
+ if d:
+ s += u"\n\t(Deprecated"
+ s += u", use `%s` instead." % d.rkey if d.rkey else ""
+ s += u")\n"
+
+ s += "\n"
+ return(s)
+
+
+##############
+# helpers
+
+from contextlib import contextmanager
+
+
+@contextmanager
+def config_prefix(prefix):
+ """contextmanager for multiple invocations of API with a common prefix
+
+ supported API functions: (register / get / set )__option
+
+ Warning: This is not thread - safe, and won't work properly if you import
+ the API functions into your module using the "from x import y" construct.
+
+ Example:
+
+ import pandas.core.config as cf
+ with cf.config_prefix("display.font"):
+ cf.register_option("color", "red")
+ cf.register_option("size", " 5 pt")
+ cf.set_option(size, " 6 pt")
+ cf.get_option(size)
+ ...
+
+ etc'
+
+ will register options "display.font.color", "display.font.size", set the
+ value of "display.font.size"... and so on.
+ """
+ # Note: reset_option relies on set_option, and on key directly
+ # it does not fit in to this monkey-patching scheme
+
+ global register_option, get_option, set_option, reset_option
+
+ def wrap(func):
+ def inner(key, *args, **kwds):
+ pkey="%s.%s" % (prefix, key)
+ return func(pkey, *args, **kwds)
+ return inner
+
+ _register_option = register_option
+ _get_option = get_option
+ _set_option = set_option
+ set_option = wrap(set_option)
+ get_option = wrap(get_option)
+ register_option = wrap(register_option)
+ yield
+ set_option = _set_option
+ get_option = _get_option
+ register_option = _register_option
+
+
+# These factories and methods are handy for use as the validator
+# arg in register_option
+def is_type_factory(_type):
+ """
+
+ Parameters
+ ----------
+ `_type` - a type to be compared against (e.g. type(x) == `_type`)
+
+ Returns
+ -------
+ validator - a function of a single argument x , which returns the
+ True if type(x) is equal to `_type`
+
+ """
+ def inner(x):
+ if type(x) != _type:
+ raise ValueError("Value must have type '%s'" % str(_type))
+
+ return inner
+
+
+def is_instance_factory(_type):
+ """
+
+ Parameters
+ ----------
+ `_type` - the type to be checked against
+
+ Returns
+ -------
+ validator - a function of a single argument x , which returns the
+ True if x is an instance of `_type`
+
+ """
+ def inner(x):
+ if not isinstance(x, _type):
+ raise ValueError("Value must be an instance of '%s'" % str(_type))
+
+ return inner
+
+# common type validators, for convenience
+# usage: register_option(... , validator = is_int)
+is_int = is_type_factory(int)
+is_bool = is_type_factory(bool)
+is_float = is_type_factory(float)
+is_str = is_type_factory(str)
+is_unicode = is_type_factory(unicode)
+is_text = is_instance_factory(basestring)
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
new file mode 100644
index 0000000000000..b0279a94983d9
--- /dev/null
+++ b/pandas/core/config_init.py
@@ -0,0 +1,103 @@
+from __future__ import with_statement # support python 2.5
+
+import pandas.core.config as cf
+from pandas.core.config import is_int,is_bool,is_text,is_float
+from pandas.core.format import detect_console_encoding
+
+"""
+This module is imported from the pandas package __init__.py file
+in order to ensure that the core.config options registered here will
+be available as soon as the user loads the package. if register_option
+is invoked inside specific modules, they will not be registered until that
+module is imported, which may or may not be a problem.
+
+If you need to make sure options are available even before a certain
+module is imported, register them here rather then in the module.
+
+"""
+
+
+###########################################
+# options from the "print_config" namespace
+
+pc_precision_doc="""
+: int
+ Floating point output precision (number of significant digits). This is
+ only a suggestion
+"""
+
+pc_colspace_doc="""
+: int
+ Default space for DataFrame columns, defaults to 12
+"""
+
+pc_max_rows_doc="""
+: int
+"""
+
+pc_max_cols_doc="""
+: int
+ max_rows and max_columns are used in __repr__() methods to decide if
+ to_string() or info() is used to render an object to a string.
+ Either one, or both can be set to 0 (experimental). Pandas will figure
+ out how big the terminal is and will not display more rows or/and
+ columns that can fit on it.
+"""
+
+pc_nb_repr_h_doc="""
+: boolean
+ When True (default), IPython notebook will use html representation for
+ pandas objects (if it is available).
+"""
+
+pc_date_dayfirst_doc="""
+: boolean
+ When True, prints and parses dates with the day first, eg 20/01/2005
+"""
+
+pc_date_yearfirst_doc="""
+: boolean
+ When True, prints and parses dates with the year first, eg 2005/01/20
+"""
+
+pc_pprint_nest_depth="""
+: int
+ Defaults to 3.
+ Controls the number of nested levels to process when pretty-printing
+"""
+
+pc_multi_sparse_doc="""
+: boolean
+ Default True, "sparsify" MultiIndex display (don't display repeated
+ elements in outer levels within groups)
+"""
+
+pc_encoding_doc="""
+: str/unicode
+ Defaults to the detected encoding of the console.
+ Specifies the encoding to be used for strings returned by to_string,
+ these are generally strings meant to be displayed on the console.
+"""
+
+with cf.config_prefix('print_config'):
+ cf.register_option('precision', 7, pc_precision_doc, validator=is_int)
+ cf.register_option('digits', 7, validator=is_int)
+ cf.register_option('float_format', None)
+ cf.register_option('column_space', 12, validator=is_int)
+ cf.register_option('max_rows', 200, pc_max_rows_doc, validator=is_int)
+ cf.register_option('max_colwidth', 50, validator=is_int)
+ cf.register_option('max_columns', 0, pc_max_cols_doc, validator=is_int)
+ cf.register_option('colheader_justify', 'right',
+ validator=is_text)
+ cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
+ validator=is_bool)
+ cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
+ validator=is_bool)
+ cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
+ validator=is_bool)
+ cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
+ validator=is_int)
+ cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
+ validator=is_bool)
+ cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
+ validator=is_text)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 4230f3c19aba6..0a91be9908172 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -11,7 +11,8 @@
from pandas.core.common import adjoin, isnull, notnull
from pandas.core.index import MultiIndex, _ensure_index
from pandas.util import py3compat
-
+from pandas.core.config import get_option, set_option, \
+ reset_options
import pandas.core.common as com
import pandas.lib as lib
@@ -69,7 +70,7 @@ def __init__(self, series, buf=None, header=True, length=True,
self.header = header
if float_format is None:
- float_format = print_config.float_format
+ float_format = get_option("print_config.float_format")
self.float_format = float_format
def _get_footer(self):
@@ -145,11 +146,11 @@ def to_string(self):
_strlen = len
else:
def _encode_diff(x):
- return len(x) - len(x.decode(print_config.encoding))
+ return len(x) - len(x.decode(get_option("print_config.encoding")))
def _strlen(x):
try:
- return len(x.decode(print_config.encoding))
+ return len(x.decode(get_option("print_config.encoding")))
except UnicodeError:
return len(x)
@@ -176,7 +177,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
self.show_index_names = index_names
if sparsify is None:
- sparsify = print_config.multi_sparse
+ sparsify = get_option("print_config.multi_sparse")
self.sparsify = sparsify
@@ -188,7 +189,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
self.index = index
if justify is None:
- self.justify = print_config.colheader_justify
+ self.justify = get_option("print_config.colheader_justify")
else:
self.justify = justify
@@ -697,13 +698,13 @@ def format_array(values, formatter, float_format=None, na_rep='NaN',
fmt_klass = GenericArrayFormatter
if space is None:
- space = print_config.column_space
+ space = get_option("print_config.column_space")
if float_format is None:
- float_format = print_config.float_format
+ float_format = get_option("print_config.float_format")
if digits is None:
- digits = print_config.precision
+ digits = get_option("print_config.precision")
fmt_obj = fmt_klass(values, digits, na_rep=na_rep,
float_format=float_format,
@@ -739,9 +740,9 @@ def _have_unicode(self):
def _format_strings(self, use_unicode=False):
if self.float_format is None:
- float_format = print_config.float_format
+ float_format = get_option("print_config.float_format")
if float_format is None:
- fmt_str = '%% .%dg' % print_config.precision
+ fmt_str = '%% .%dg' % get_option("print_config.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
@@ -863,7 +864,7 @@ def _make_fixed_width(strings, justify='right', minimum=None):
if minimum is not None:
max_len = max(minimum, max_len)
- conf_max = print_config.max_colwidth
+ conf_max = get_option("print_config.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
@@ -941,7 +942,7 @@ def set_printoptions(precision=None, column_space=None, max_rows=None,
max_columns=None, colheader_justify=None,
max_colwidth=None, notebook_repr_html=None,
date_dayfirst=None, date_yearfirst=None,
- multi_sparse=None, encoding=None):
+ pprint_nest_depth=None,multi_sparse=None, encoding=None):
"""
Alter default behavior of DataFrame.toString
@@ -965,37 +966,65 @@ def set_printoptions(precision=None, column_space=None, max_rows=None,
When True, prints and parses dates with the day first, eg 20/01/2005
date_yearfirst : boolean
When True, prints and parses dates with the year first, eg 2005/01/20
+ pprint_nest_depth : int
+ Defaults to 3.
+ Controls the number of nested levels to process when pretty-printing
+ nested sequences.
multi_sparse : boolean
Default True, "sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
if precision is not None:
- print_config.precision = precision
+ set_option("print_config.precision", precision)
if column_space is not None:
- print_config.column_space = column_space
+ set_option("print_config.column_space", column_space)
if max_rows is not None:
- print_config.max_rows = max_rows
+ set_option("print_config.max_rows", max_rows)
if max_colwidth is not None:
- print_config.max_colwidth = max_colwidth
+ set_option("print_config.max_colwidth", max_colwidth)
if max_columns is not None:
- print_config.max_columns = max_columns
+ set_option("print_config.max_columns", max_columns)
if colheader_justify is not None:
- print_config.colheader_justify = colheader_justify
+ set_option("print_config.colheader_justify", colheader_justify)
if notebook_repr_html is not None:
- print_config.notebook_repr_html = notebook_repr_html
+ set_option("print_config.notebook_repr_html", notebook_repr_html)
if date_dayfirst is not None:
- print_config.date_dayfirst = date_dayfirst
+ set_option("print_config.date_dayfirst", date_dayfirst)
if date_yearfirst is not None:
- print_config.date_yearfirst = date_yearfirst
+ set_option("print_config.date_yearfirst", date_yearfirst)
+ if pprint_nest_depth is not None:
+ set_option("print_config.pprint_nest_depth", pprint_nest_depth)
if multi_sparse is not None:
- print_config.multi_sparse = multi_sparse
+ set_option("print_config.multi_sparse", multi_sparse)
if encoding is not None:
- print_config.encoding = encoding
-
+ set_option("print_config.encoding", encoding)
def reset_printoptions():
- print_config.reset()
+ reset_options("print_config.")
+
+def detect_console_encoding():
+ """
+ Try to find the most capable encoding supported by the console.
+ slighly modified from the way IPython handles the same issue.
+ """
+ import locale
+
+ encoding = None
+ try:
+ encoding=sys.stdin.encoding
+ except AttributeError:
+ pass
+ if not encoding or encoding =='ascii': # try again for something better
+ try:
+ encoding = locale.getpreferredencoding()
+ except Exception:
+ pass
+
+ if not encoding: # when all else fails. this will usually be "ascii"
+ encoding = sys.getdefaultencoding()
+
+ return encoding
class EngFormatter(object):
"""
@@ -1103,59 +1132,8 @@ def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False):
"being renamed to 'accuracy'", FutureWarning)
accuracy = precision
- print_config.float_format = EngFormatter(accuracy, use_eng_prefix)
- print_config.column_space = max(12, accuracy + 9)
-
-
-class _GlobalPrintConfig(object):
- """
- Holds the console formatting settings for DataFrame and friends
- """
-
- def __init__(self):
- self.precision = self.digits = 7
- self.float_format = None
- self.column_space = 12
- self.max_rows = 200
- self.max_colwidth = 50
- self.max_columns = 0
- self.colheader_justify = 'right'
- self.notebook_repr_html = True
- self.date_dayfirst = False
- self.date_yearfirst = False
- self.pprint_nest_depth = 3
- self.multi_sparse = True
- self.encoding = self.detect_encoding()
-
- def detect_encoding(self):
- """
- Try to find the most capable encoding supported by the console.
- slighly modified from the way IPython handles the same issue.
- """
- import locale
-
- encoding = None
- try:
- encoding = sys.stdin.encoding
- except AttributeError:
- pass
-
- if not encoding or encoding == 'ascii': # try again for better
- try:
- encoding = locale.getpreferredencoding()
- except Exception:
- pass
-
- if not encoding: # when all else fails. this will usually be "ascii"
- encoding = sys.getdefaultencoding()
-
- return encoding
-
- def reset(self):
- self.__init__()
-
-print_config = _GlobalPrintConfig()
-
+ set_option("print_config.float_format", EngFormatter(accuracy, use_eng_prefix))
+ set_option("print_config.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, unicode) for x in lines):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index aeed377e35fa2..df764bb36a3c0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -44,6 +44,8 @@
import pandas.core.nanops as nanops
import pandas.lib as lib
+from pandas.core.config import get_option
+
#----------------------------------------------------------------------
# Docstring templates
@@ -579,12 +581,11 @@ def _need_info_repr_(self):
Check if it is needed to use info/summary view to represent a
particular DataFrame.
"""
- config = fmt.print_config
terminal_width, terminal_height = get_terminal_size()
- max_rows = (terminal_height if config.max_rows == 0
- else config.max_rows)
- max_columns = config.max_columns
+ max_rows = (terminal_height if get_option("print_config.max_rows") == 0
+ else get_option("print_config.max_rows"))
+ max_columns = get_option("print_config.max_columns")
if max_columns > 0:
if len(self.index) <= max_rows and \
@@ -628,7 +629,7 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
- if fmt.print_config.notebook_repr_html:
+ if get_option("print_config.notebook_repr_html"):
if self._need_info_repr_():
return None
else:
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 035d2531f382f..83f4d26fd7fb2 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -13,6 +13,7 @@
import pandas._algos as _algos
from pandas.lib import Timestamp
from pandas.util import py3compat
+from pandas.core.config import get_option
__all__ = ['Index']
@@ -1514,8 +1515,7 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
result_levels.append(level)
if sparsify is None:
- import pandas.core.format as fmt
- sparsify = fmt.print_config.multi_sparse
+ sparsify = get_option("print_config.multi_sparse")
if sparsify:
# little bit of a kludge job for #1217
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8101dace1a15f..1a3baa223f0a4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -33,6 +33,7 @@
from pandas.util.decorators import Appender, Substitution, cache_readonly
from pandas.compat.scipy import scoreatpercentile as _quantile
+from pandas.core.config import get_option
__all__ = ['Series', 'TimeSeries']
@@ -914,8 +915,8 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
def __repr__(self):
"""Clean string representation of a Series"""
width, height = get_terminal_size()
- max_rows = (height if fmt.print_config.max_rows == 0
- else fmt.print_config.max_rows)
+ max_rows = (height if get_option("print_config.max_rows") == 0
+ else get_option("print_config.max_rows"))
if len(self.index) > (max_rows or 1000):
result = self._tidy_repr(min(30, max_rows - 4))
elif len(self.index) > 0:
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
new file mode 100644
index 0000000000000..862b0d29ffcdf
--- /dev/null
+++ b/pandas/tests/test_config.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+from __future__ import with_statement # support python 2.5
+import pandas as pd
+import unittest
+import warnings
+import nose
+
+class TestConfig(unittest.TestCase):
+
+ def __init__(self,*args):
+ super(TestConfig,self).__init__(*args)
+
+ from copy import deepcopy
+ self.cf = pd.core.config
+ self.gc=deepcopy(getattr(self.cf, '__global_config'))
+ self.do=deepcopy(getattr(self.cf, '__deprecated_options'))
+ self.ro=deepcopy(getattr(self.cf, '__registered_options'))
+
+ def setUp(self):
+ setattr(self.cf, '__global_config', {})
+ setattr(self.cf, '__deprecated_options', {})
+ setattr(self.cf, '__registered_options', {})
+
+ def tearDown(self):
+ setattr(self.cf, '__global_config',self.gc)
+ setattr(self.cf, '__deprecated_options', self.do)
+ setattr(self.cf, '__registered_options', self.ro)
+
+ def test_api(self):
+
+ #the pandas object exposes the user API
+ self.assertTrue(hasattr(pd, 'get_option'))
+ self.assertTrue(hasattr(pd, 'set_option'))
+ self.assertTrue(hasattr(pd, 'reset_option'))
+ self.assertTrue(hasattr(pd, 'reset_options'))
+ self.assertTrue(hasattr(pd, 'describe_options'))
+
+ def test_register_option(self):
+ self.cf.register_option('a', 1, 'doc')
+
+ # can't register an already registered option
+ self.assertRaises(KeyError, self.cf.register_option, 'a', 1, 'doc')
+
+ # can't register an already registered option
+ self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
+ 'doc')
+ self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
+ 'doc')
+
+ # we can register options several levels deep
+ # without predefining the intermediate steps
+ # and we can define differently named options
+ # in the same namespace
+ self.cf.register_option('k.b.c.d1', 1, 'doc')
+ self.cf.register_option('k.b.c.d2', 1, 'doc')
+
+ def test_describe_options(self):
+ self.cf.register_option('a', 1, 'doc')
+ self.cf.register_option('b', 1, 'doc2')
+ self.cf.deprecate_option('b')
+
+ self.cf.register_option('c.d.e1', 1, 'doc3')
+ self.cf.register_option('c.d.e2', 1, 'doc4')
+ self.cf.register_option('f', 1)
+ self.cf.register_option('g.h', 1)
+ self.cf.deprecate_option('g.h',rkey="blah")
+
+ # non-existent keys raise KeyError
+ self.assertRaises(KeyError, self.cf.describe_options, 'no.such.key')
+
+ # we can get the description for any key we registered
+ self.assertTrue('doc' in self.cf.describe_options('a',_print_desc=False))
+ self.assertTrue('doc2' in self.cf.describe_options('b',_print_desc=False))
+ self.assertTrue('precated' in self.cf.describe_options('b',_print_desc=False))
+
+ self.assertTrue('doc3' in self.cf.describe_options('c.d.e1',_print_desc=False))
+ self.assertTrue('doc4' in self.cf.describe_options('c.d.e2',_print_desc=False))
+
+ # if no doc is specified we get a default message
+ # saying "description not available"
+ self.assertTrue('vailable' in self.cf.describe_options('f',_print_desc=False))
+ self.assertTrue('vailable' in self.cf.describe_options('g.h',_print_desc=False))
+ self.assertTrue('precated' in self.cf.describe_options('g.h',_print_desc=False))
+ self.assertTrue('blah' in self.cf.describe_options('g.h',_print_desc=False))
+
+ def test_get_option(self):
+ self.cf.register_option('a', 1, 'doc')
+ self.cf.register_option('b.a', 'hullo', 'doc2')
+ self.cf.register_option('b.b', None, 'doc2')
+
+ # gets of existing keys succeed
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+ self.assertTrue(self.cf.get_option('b.b') is None)
+
+ # gets of non-existent keys fail
+ self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
+
+ def test_set_option(self):
+ self.cf.register_option('a', 1, 'doc')
+ self.cf.register_option('b.a', 'hullo', 'doc2')
+ self.cf.register_option('b.b', None, 'doc2')
+
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+ self.assertTrue(self.cf.get_option('b.b') is None)
+
+ self.cf.set_option('a', 2)
+ self.cf.set_option('b.a', 'wurld')
+ self.cf.set_option('b.b', 1.1)
+
+ self.assertEqual(self.cf.get_option('a'), 2)
+ self.assertEqual(self.cf.get_option('b.a'), 'wurld')
+ self.assertEqual(self.cf.get_option('b.b'), 1.1)
+
+ self.assertRaises(KeyError, self.cf.set_option, 'no.such.key', None)
+
+ def test_validation(self):
+ self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
+ self.cf.register_option('b.a', 'hullo', 'doc2',
+ validator=self.cf.is_text)
+ self.assertRaises(ValueError, self.cf.register_option, 'a.b.c.d2',
+ 'NO', 'doc', validator=self.cf.is_int)
+
+ self.cf.set_option('a', 2) # int is_int
+ self.cf.set_option('b.a', 'wurld') # str is_str
+
+ self.assertRaises(ValueError, self.cf.set_option, 'a', None) # None not is_int
+ self.assertRaises(ValueError, self.cf.set_option, 'a', 'ab')
+ self.assertRaises(ValueError, self.cf.set_option, 'b.a', 1)
+
+ def test_reset_option(self):
+ self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
+ self.cf.register_option('b.a', 'hullo', 'doc2',
+ validator=self.cf.is_str)
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+
+ self.cf.set_option('a', 2)
+ self.cf.set_option('b.a', 'wurld')
+ self.assertEqual(self.cf.get_option('a'), 2)
+ self.assertEqual(self.cf.get_option('b.a'), 'wurld')
+
+ self.cf.reset_option('a')
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'wurld')
+ self.cf.reset_option('b.a')
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+
+ def test_reset_options(self):
+ self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
+ self.cf.register_option('b.a', 'hullo', 'doc2',
+ validator=self.cf.is_str)
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+
+ self.cf.set_option('a', 2)
+ self.cf.set_option('b.a', 'wurld')
+ self.assertEqual(self.cf.get_option('a'), 2)
+ self.assertEqual(self.cf.get_option('b.a'), 'wurld')
+
+ self.cf.reset_options()
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.a'), 'hullo')
+
+
+ def test_deprecate_option(self):
+ import sys
+ self.cf.deprecate_option('c') # we can deprecate non-existent options
+
+ # testing warning with catch_warning was only added in 2.6
+ if sys.version_info[:2]<(2,6):
+ raise nose.SkipTest()
+
+ self.assertTrue(self.cf._is_deprecated('c'))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ try:
+ self.cf.get_option('c')
+ except KeyError:
+ pass
+ else:
+ self.fail("Nonexistent option didn't raise KeyError")
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('deprecated' in str(w[-1])) # we get the default message
+
+ self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
+ self.cf.register_option('b.a', 'hullo', 'doc2')
+ self.cf.register_option('c', 'hullo', 'doc2')
+
+ self.cf.deprecate_option('a', removal_ver='nifty_ver')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.cf.get_option('a')
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('eprecated' in str(w[-1])) # we get the default message
+ self.assertTrue('nifty_ver' in str(w[-1])) # with the removal_ver quoted
+
+ self.assertRaises(KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
+
+ self.cf.deprecate_option('b.a', 'zounds!')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.cf.get_option('b.a')
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('zounds!' in str(w[-1])) # we get the custom message
+
+ # test rerouting keys
+ self.cf.register_option('d.a', 'foo', 'doc2')
+ self.cf.register_option('d.dep', 'bar', 'doc2')
+ self.assertEqual(self.cf.get_option('d.a'), 'foo')
+ self.assertEqual(self.cf.get_option('d.dep'), 'bar')
+
+ self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.assertEqual(self.cf.get_option('d.dep'), 'foo')
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('eprecated' in str(w[-1])) # we get the custom message
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('eprecated' in str(w[-1])) # we get the custom message
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ self.assertEqual(self.cf.get_option('d.dep'), 'baz')
+
+ self.assertEqual(len(w), 1) # should have raised one warning
+ self.assertTrue('eprecated' in str(w[-1])) # we get the custom message
+
+ def test_config_prefix(self):
+ with self.cf.config_prefix("base"):
+ self.cf.register_option('a',1,"doc1")
+ self.cf.register_option('b',2,"doc2")
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b'), 2)
+
+ self.cf.set_option('a',3)
+ self.cf.set_option('b',4)
+ self.assertEqual(self.cf.get_option('a'), 3)
+ self.assertEqual(self.cf.get_option('b'), 4)
+
+ self.assertEqual(self.cf.get_option('base.a'), 3)
+ self.assertEqual(self.cf.get_option('base.b'), 4)
+ self.assertTrue('doc1' in self.cf.describe_options('base.a',_print_desc=False))
+ self.assertTrue('doc2' in self.cf.describe_options('base.b',_print_desc=False))
+
+ self.cf.reset_option('base.a')
+ self.cf.reset_option('base.b')
+
+ with self.cf.config_prefix("base"):
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b'), 2)
+
+
+# fmt.reset_printoptions and fmt.set_printoptions were altered
+# to use core.config, test_format exercises those paths.
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 542e5ee964362..7238ae134252b 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -19,6 +19,7 @@
import pandas.util.testing as tm
import pandas
import pandas as pd
+from pandas.core.config import set_option,get_option
_frame = DataFrame(tm.getSeriesData())
@@ -64,7 +65,7 @@ def test_repr_tuples(self):
def test_repr_truncation(self):
max_len = 20
- fmt.print_config.max_colwidth = max_len
+ set_option("print_config.max_colwidth", max_len)
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(max_len - 1,
max_len + 1)) for i in range(10)]})
@@ -76,10 +77,10 @@ def test_repr_truncation(self):
else:
self.assert_('...' not in line)
- fmt.print_config.max_colwidth = None
+ set_option("print_config.max_colwidth", 999999)
self.assert_('...' not in repr(df))
- fmt.print_config.max_colwidth = max_len + 2
+ set_option("print_config.max_colwidth", max_len + 2)
self.assert_('...' not in repr(df))
def test_repr_should_return_str (self):
@@ -425,7 +426,7 @@ def test_to_string_float_formatting(self):
assert(df_s == expected)
fmt.reset_printoptions()
- self.assertEqual(fmt.print_config.precision, 7)
+ self.assertEqual(get_option("print_config.precision"), 7)
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 9e1c451c42887..dbb75f1e749c0 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -152,7 +152,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
-------
datetime, datetime/dateutil.parser._result, str
"""
- from pandas.core.format import print_config
+ from pandas.core.config import get_option
from pandas.tseries.offsets import DateOffset
from pandas.tseries.frequencies import (_get_rule_month, _month_numbers,
_get_freq_str)
@@ -221,9 +221,9 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
return mresult
if dayfirst is None:
- dayfirst = print_config.date_dayfirst
+ dayfirst = get_option("print_config.date_dayfirst")
if yearfirst is None:
- yearfirst = print_config.date_yearfirst
+ yearfirst = get_option("print_config.date_yearfirst")
try:
parsed = parse(arg, dayfirst=dayfirst, yearfirst=yearfirst)
| PR for #2081
## Summary
- core.config is a new module which serves as a general mechanism for working with configurables.
- superceds fmt.print_config, while remaining backward-compatible (set_printoptions(), reset_printoptions() are still working).
- adds the following user API's: `get_option()`, `set_option`, `reset_option()`, `reset_options()` and `describe_option()` (all under the pandas top level module).
- adds the following for developer use: `register_option()` and `deprecate_option()`.
## TL;DR description
```
ENH: Add core.config module for managing package-wide configurables
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associcated
with them, which are stored in auxilary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/2097 | 2012-10-21T11:28:51Z | 2012-11-27T22:19:28Z | 2012-11-27T22:19:27Z | 2014-06-13T09:21:50Z |
STY: pep8 | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index d5380b66a43f6..cb7314a26689f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -9,6 +9,7 @@
import pandas.lib as lib
import pandas._algos as _algos
+
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
@@ -36,6 +37,7 @@ def match(to_match, values, na_sentinel=-1):
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
return _hashtable_algo(f, values.dtype)
+
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
@@ -62,6 +64,7 @@ def count(values, uniques=None):
else:
return _hashtable_algo(f, values.dtype)
+
def _hashtable_algo(f, dtype):
"""
f(HashTable, type_caster) -> result
@@ -83,6 +86,7 @@ def _count_generic(values, table_type, type_caster):
return Series(counts, index=uniques)
+
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
@@ -90,6 +94,7 @@ def _match_generic(values, index, table_type, type_caster):
table.map_locations(index)
return table.lookup(values)
+
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
@@ -138,6 +143,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):
return labels, uniques, counts
+
def value_counts(values, sort=True, ascending=False):
"""
Compute a histogram of the counts of non-null values
@@ -192,6 +198,7 @@ def rank(values, axis=0, method='average', na_option='keep',
ascending=ascending)
return ranks
+
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
@@ -254,8 +261,8 @@ def _get_score(at):
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
- raise ValueError("interpolation_method can only be 'fraction', " \
- "'lower' or 'higher'")
+ raise ValueError("interpolation_method can only be 'fraction' "
+ ", 'lower' or 'higher'")
return score
@@ -265,11 +272,12 @@ def _get_score(at):
q = np.asarray(q, np.float64)
return _algos.arrmap_float64(q, _get_score)
+
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
- return a + (b - a)*fraction
+ return a + (b - a) * fraction
def _get_data_algo(values, func_map):
@@ -287,6 +295,7 @@ def _get_data_algo(values, func_map):
values = com._ensure_object(values)
return f, values
+
def group_position(*args):
"""
Get group position
@@ -303,19 +312,19 @@ def group_position(*args):
_rank1d_functions = {
- 'float64' : lib.rank_1d_float64,
- 'int64' : lib.rank_1d_int64,
- 'generic' : lib.rank_1d_generic
+ 'float64': lib.rank_1d_float64,
+ 'int64': lib.rank_1d_int64,
+ 'generic': lib.rank_1d_generic
}
_rank2d_functions = {
- 'float64' : lib.rank_2d_float64,
- 'int64' : lib.rank_2d_int64,
- 'generic' : lib.rank_2d_generic
+ 'float64': lib.rank_2d_float64,
+ 'int64': lib.rank_2d_int64,
+ 'generic': lib.rank_2d_generic
}
_hashtables = {
- 'float64' : lib.Float64HashTable,
- 'int64' : lib.Int64HashTable,
- 'generic' : lib.PyObjectHashTable
+ 'float64': lib.Float64HashTable,
+ 'int64': lib.Int64HashTable,
+ 'generic': lib.PyObjectHashTable
}
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 34b05d1a2c01a..1ff23bcce2a9b 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -24,6 +24,7 @@ def f(self, other):
return f
+
class Categorical(object):
"""
Represents a categorical variable in classic R / S-plus fashion
@@ -60,6 +61,7 @@ def from_array(cls, data):
name=getattr(data, 'name', None))
_levels = None
+
def _set_levels(self, levels):
from pandas.core.index import _ensure_index
@@ -95,7 +97,8 @@ def __repr__(self):
indent = ' ' * (levstring.find('[') + len(levheader) + 1)
lines = levstring.split('\n')
- levstring = '\n'.join([lines[0]] + [indent + x.lstrip() for x in lines[1:]])
+ levstring = '\n'.join([lines[0]] +
+ [indent + x.lstrip() for x in lines[1:]])
return temp % ('' if self.name is None else self.name,
repr(values), levheader + levstring)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 8e851c67176f1..c400a5e11002e 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -30,15 +30,18 @@ def next(x):
try:
np.seterr(all='ignore')
# np.set_printoptions(suppress=True)
-except Exception: # pragma: no cover
+except Exception: # pragma: no cover
pass
+
class PandasError(Exception):
pass
+
class AmbiguousIndexError(PandasError, KeyError):
pass
+
def isnull(obj):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
@@ -66,6 +69,7 @@ def isnull(obj):
else:
return obj is None
+
def _isnull_ndarraylike(obj):
from pandas import Series
values = np.asarray(obj)
@@ -90,6 +94,7 @@ def _isnull_ndarraylike(obj):
result = -np.isfinite(obj)
return result
+
def notnull(obj):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
@@ -108,6 +113,7 @@ def notnull(obj):
return not res
return -res
+
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
@@ -139,6 +145,7 @@ def mask_missing(arr, values_to_mask):
return mask
+
def _pickle_array(arr):
arr = arr.view(np.ndarray)
@@ -147,10 +154,12 @@ def _pickle_array(arr):
return buf.getvalue()
+
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
return arr
+
def _view_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if na_override is not None and np.isnan(fill_value):
@@ -162,45 +171,46 @@ def wrapper(arr, indexer, out, fill_value=np.nan):
_take1d_dict = {
- 'float64' : _algos.take_1d_float64,
- 'int32' : _algos.take_1d_int32,
- 'int64' : _algos.take_1d_int64,
- 'object' : _algos.take_1d_object,
- 'bool' : _view_wrapper(_algos.take_1d_bool, np.uint8),
- 'datetime64[ns]' : _view_wrapper(_algos.take_1d_int64, np.int64,
- na_override=lib.iNaT),
+ 'float64': _algos.take_1d_float64,
+ 'int32': _algos.take_1d_int32,
+ 'int64': _algos.take_1d_int64,
+ 'object': _algos.take_1d_object,
+ 'bool': _view_wrapper(_algos.take_1d_bool, np.uint8),
+ 'datetime64[ns]': _view_wrapper(_algos.take_1d_int64, np.int64,
+ na_override=lib.iNaT),
}
_take2d_axis0_dict = {
- 'float64' : _algos.take_2d_axis0_float64,
- 'int32' : _algos.take_2d_axis0_int32,
- 'int64' : _algos.take_2d_axis0_int64,
- 'object' : _algos.take_2d_axis0_object,
- 'bool' : _view_wrapper(_algos.take_2d_axis0_bool, np.uint8),
- 'datetime64[ns]' : _view_wrapper(_algos.take_2d_axis0_int64, np.int64,
- na_override=lib.iNaT),
+ 'float64': _algos.take_2d_axis0_float64,
+ 'int32': _algos.take_2d_axis0_int32,
+ 'int64': _algos.take_2d_axis0_int64,
+ 'object': _algos.take_2d_axis0_object,
+ 'bool': _view_wrapper(_algos.take_2d_axis0_bool, np.uint8),
+ 'datetime64[ns]': _view_wrapper(_algos.take_2d_axis0_int64, np.int64,
+ na_override=lib.iNaT),
}
_take2d_axis1_dict = {
- 'float64' : _algos.take_2d_axis1_float64,
- 'int32' : _algos.take_2d_axis1_int32,
- 'int64' : _algos.take_2d_axis1_int64,
- 'object' : _algos.take_2d_axis1_object,
- 'bool' : _view_wrapper(_algos.take_2d_axis1_bool, np.uint8),
- 'datetime64[ns]' : _view_wrapper(_algos.take_2d_axis1_int64, np.int64,
+ 'float64': _algos.take_2d_axis1_float64,
+ 'int32': _algos.take_2d_axis1_int32,
+ 'int64': _algos.take_2d_axis1_int64,
+ 'object': _algos.take_2d_axis1_object,
+ 'bool': _view_wrapper(_algos.take_2d_axis1_bool, np.uint8),
+ 'datetime64[ns]': _view_wrapper(_algos.take_2d_axis1_int64, np.int64,
na_override=lib.iNaT),
}
_take2d_multi_dict = {
- 'float64' : _algos.take_2d_multi_float64,
- 'int32' : _algos.take_2d_multi_int32,
- 'int64' : _algos.take_2d_multi_int64,
- 'object' : _algos.take_2d_multi_object,
- 'bool' : _view_wrapper(_algos.take_2d_multi_bool, np.uint8),
- 'datetime64[ns]' : _view_wrapper(_algos.take_2d_multi_int64, np.int64,
- na_override=lib.iNaT),
+ 'float64': _algos.take_2d_multi_float64,
+ 'int32': _algos.take_2d_multi_int32,
+ 'int64': _algos.take_2d_multi_int64,
+ 'object': _algos.take_2d_multi_object,
+ 'bool': _view_wrapper(_algos.take_2d_multi_bool, np.uint8),
+ 'datetime64[ns]': _view_wrapper(_algos.take_2d_multi_int64, np.int64,
+ na_override=lib.iNaT),
}
+
def _get_take2d_function(dtype_str, axis=0):
if axis == 0:
return _take2d_axis0_dict[dtype_str]
@@ -208,9 +218,10 @@ def _get_take2d_function(dtype_str, axis=0):
return _take2d_axis1_dict[dtype_str]
elif axis == 'multi':
return _take2d_multi_dict[dtype_str]
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('bad axis: %s' % axis)
+
def take_1d(arr, indexer, out=None, fill_value=np.nan):
"""
Specialized Cython take which sets NaN values in one pass
@@ -258,6 +269,7 @@ def take_1d(arr, indexer, out=None, fill_value=np.nan):
return out
+
def take_2d_multi(arr, row_idx, col_idx, fill_value=np.nan, out=None):
dtype_str = arr.dtype.name
@@ -266,7 +278,7 @@ def take_2d_multi(arr, row_idx, col_idx, fill_value=np.nan, out=None):
if dtype_str in ('int32', 'int64', 'bool'):
row_mask = row_idx == -1
- col_mask= col_idx == -1
+ col_mask = col_idx == -1
needs_masking = row_mask.any() or col_mask.any()
if needs_masking:
@@ -348,15 +360,18 @@ def take_2d(arr, indexer, out=None, mask=None, needs_masking=None, axis=0,
fill_value=fill_value)
return result
+
def ndtake(arr, indexer, axis=0, out=None):
return arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
+
def mask_out_axis(arr, mask, axis, fill_value=np.nan):
indexer = [slice(None)] * arr.ndim
indexer[axis] = mask
arr[tuple(indexer)] = fill_value
+
def take_fast(arr, indexer, mask, needs_masking, axis=0, out=None,
fill_value=np.nan):
if arr.ndim == 2:
@@ -369,6 +384,7 @@ def take_fast(arr, indexer, mask, needs_masking, axis=0, out=None,
out_passed=out is not None, fill_value=fill_value)
return result
+
def _maybe_mask(result, mask, needs_masking, axis=0, out_passed=False,
fill_value=np.nan):
if needs_masking:
@@ -380,6 +396,7 @@ def _maybe_mask(result, mask, needs_masking, axis=0, out_passed=False,
mask_out_axis(result, mask, axis, fill_value)
return result
+
def _maybe_upcast(values):
if issubclass(values.dtype.type, np.integer):
values = values.astype(float)
@@ -388,11 +405,13 @@ def _maybe_upcast(values):
return values
+
def _need_upcast(values):
if issubclass(values.dtype.type, (np.integer, np.bool_)):
return True
return False
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
@@ -401,8 +420,11 @@ def wrapper(arr, mask, limit=None):
_pad_1d_datetime = _interp_wrapper(_algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(_algos.pad_2d_inplace_int64, np.int64)
-_backfill_1d_datetime = _interp_wrapper(_algos.backfill_inplace_int64, np.int64)
-_backfill_2d_datetime = _interp_wrapper(_algos.backfill_2d_inplace_int64, np.int64)
+_backfill_1d_datetime = _interp_wrapper(_algos.backfill_inplace_int64,
+ np.int64)
+_backfill_2d_datetime = _interp_wrapper(_algos.backfill_2d_inplace_int64,
+ np.int64)
+
def pad_1d(values, limit=None, mask=None):
if is_float_dtype(values):
@@ -411,7 +433,7 @@ def pad_1d(values, limit=None, mask=None):
_method = _pad_1d_datetime
elif values.dtype == np.object_:
_method = _algos.pad_inplace_object
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
if mask is None:
@@ -419,6 +441,7 @@ def pad_1d(values, limit=None, mask=None):
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
+
def backfill_1d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.backfill_inplace_float64
@@ -426,7 +449,7 @@ def backfill_1d(values, limit=None, mask=None):
_method = _backfill_1d_datetime
elif values.dtype == np.object_:
_method = _algos.backfill_inplace_object
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
if mask is None:
@@ -435,6 +458,7 @@ def backfill_1d(values, limit=None, mask=None):
_method(values, mask, limit=limit)
+
def pad_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.pad_2d_inplace_float64
@@ -442,7 +466,7 @@ def pad_2d(values, limit=None, mask=None):
_method = _pad_2d_datetime
elif values.dtype == np.object_:
_method = _algos.pad_2d_inplace_object
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
if mask is None:
@@ -455,6 +479,7 @@ def pad_2d(values, limit=None, mask=None):
# for test coverage
pass
+
def backfill_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.backfill_2d_inplace_float64
@@ -462,7 +487,7 @@ def backfill_2d(values, limit=None, mask=None):
_method = _backfill_2d_datetime
elif values.dtype == np.object_:
_method = _algos.backfill_2d_inplace_object
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
if mask is None:
@@ -475,6 +500,7 @@ def backfill_2d(values, limit=None, mask=None):
# for test coverage
pass
+
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
@@ -485,6 +511,7 @@ def _consensus_name_attr(objs):
#----------------------------------------------------------------------
# Lots of little utilities
+
def _infer_dtype(value):
if isinstance(value, (float, np.floating)):
return np.float_
@@ -495,15 +522,17 @@ def _infer_dtype(value):
else:
return np.object_
+
def _possibly_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
- elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
+ elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
+
def _is_bool_indexer(key):
if isinstance(key, np.ndarray) and key.dtype == np.object_:
key = np.asarray(key)
@@ -519,21 +548,24 @@ def _is_bool_indexer(key):
elif isinstance(key, list):
try:
return np.asarray(key).dtype == np.bool_
- except TypeError: # pragma: no cover
+ except TypeError: # pragma: no cover
return False
return False
+
def _default_index(n):
from pandas.core.index import Index
return Index(np.arange(n))
+
def ensure_float(arr):
if issubclass(arr.dtype.type, np.integer):
arr = arr.astype(float)
return arr
+
def _mut_exclusive(arg1, arg2):
if arg1 is not None and arg2 is not None:
raise Exception('mutually exclusive arguments')
@@ -542,18 +574,21 @@ def _mut_exclusive(arg1, arg2):
else:
return arg2
+
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
+
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
+
def _try_sort(iterable):
listed = list(iterable)
try:
@@ -561,17 +596,20 @@ def _try_sort(iterable):
except Exception:
return listed
+
def _count_not_none(*args):
return sum(x is not None for x in args)
#------------------------------------------------------------------------------
# miscellaneous python tools
+
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
- return ''.join(Random().sample(string.ascii_letters+string.digits, n))
+ return ''.join(Random().sample(string.ascii_letters + string.digits, n))
+
def adjoin(space, *lists):
"""
@@ -595,6 +633,7 @@ def adjoin(space, *lists):
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
+
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
@@ -603,6 +642,7 @@ def _join_unicode(lines, sep=''):
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
+
def iterpairs(seq):
"""
Parameters
@@ -625,10 +665,12 @@ def iterpairs(seq):
return itertools.izip(seq_it, seq_it_next)
+
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
+
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
@@ -636,6 +678,7 @@ def banner(message):
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
+
class groupby(dict):
"""
A simple groupby different from the one in itertools.
@@ -643,7 +686,7 @@ class groupby(dict):
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
- def __init__(self, seq, key=lambda x:x):
+ def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
@@ -654,6 +697,7 @@ def __init__(self, seq, key=lambda x:x):
def __iter__(self):
return iter(dict.items(self))
+
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
@@ -661,6 +705,7 @@ def map_indices_py(arr):
"""
return dict([(x, i) for i, x in enumerate(arr)])
+
def union(*seqs):
result = set([])
for seq in seqs:
@@ -669,9 +714,11 @@ def union(*seqs):
result |= seq
return type(seqs[0])(list(result))
+
def difference(a, b):
return type(a)(list(set(a) - set(b)))
+
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
@@ -680,6 +727,7 @@ def intersection(*seqs):
result &= seq
return type(seqs[0])(list(result))
+
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
@@ -707,6 +755,7 @@ def _asarray_tuplesafe(values, dtype=None):
return result
+
def _index_labels_to_array(labels):
if isinstance(labels, (basestring, tuple)):
labels = [labels]
@@ -714,31 +763,37 @@ def _index_labels_to_array(labels):
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
- except TypeError: # non-iterable
+ except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
+
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
+
def is_integer(obj):
return isinstance(obj, (int, long, np.integer))
+
def is_float(obj):
return isinstance(obj, (float, np.floating))
+
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
+
def is_number(obj):
return isinstance(obj, (np.number, int, long, float))
+
def is_integer_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
@@ -747,6 +802,7 @@ def is_integer_dtype(arr_or_dtype):
return (issubclass(tipo, np.integer) and not
issubclass(tipo, np.datetime64))
+
def is_datetime64_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
@@ -754,6 +810,7 @@ def is_datetime64_dtype(arr_or_dtype):
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.datetime64)
+
def is_float_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
@@ -761,9 +818,11 @@ def is_float_dtype(arr_or_dtype):
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.floating)
+
def is_list_like(arg):
return hasattr(arg, '__iter__') and not isinstance(arg, basestring)
+
def _is_sequence(x):
try:
iter(x)
@@ -797,6 +856,7 @@ def _astype_nansafe(arr, dtype):
return arr.astype(dtype)
+
def _clean_fill_method(method):
method = method.lower()
if method == 'ffill':
@@ -804,11 +864,12 @@ def _clean_fill_method(method):
if method == 'bfill':
method = 'backfill'
if method not in ['pad', 'backfill']:
- msg = ('Invalid fill method. Expecting pad (ffill) or backfill (bfill).'
- ' Got %s' % method)
+ msg = ('Invalid fill method. Expecting pad (ffill) or backfill '
+ '(bfill). Got %s' % method)
raise ValueError(msg)
return method
+
def _all_none(*args):
for arg in args:
if arg is not None:
@@ -853,6 +914,7 @@ def load(path):
finally:
f.close()
+
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
@@ -866,6 +928,7 @@ def __iter__(self):
def next(self):
return self.reader.next().encode("utf-8")
+
def _get_handle(path, mode, encoding=None):
if py3compat.PY3: # pragma: no cover
if encoding:
@@ -916,11 +979,11 @@ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
- self.quoting=kwds.get("quoting",None)
+ self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
- return (self.quoting == csv.QUOTE_NONNUMERIC and \
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
@@ -940,6 +1003,7 @@ def _check_as_is(x):
_NS_DTYPE = np.dtype('M8[ns]')
+
def _concat_compat(to_concat, axis=0):
# filter empty arrays
to_concat = [x for x in to_concat if x.shape[axis] > 0]
@@ -955,8 +1019,8 @@ def _concat_compat(to_concat, axis=0):
# Unicode consolidation
# ---------------------
#
-# pprinting utility functions for generating Unicode text or bytes(3.x)/str(2.x)
-# representations of objects.
+# pprinting utility functions for generating Unicode text or
+# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
@@ -973,21 +1037,24 @@ def _concat_compat(to_concat, axis=0):
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in fmt.print_config.encoding.
#
-# 3) if you need to write something out to file, use pprint_thing_encoded(encoding).
+# 3) if you need to write something out to file, use
+# pprint_thing_encoded(encoding).
#
-# If no encoding is specified, it defaults to utf-8. SInce encoding pure ascii with
-# utf-8 is a no-op you can safely use the default utf-8 if you're working with
-# straight ascii.
+# If no encoding is specified, it defaults to utf-8. Since encoding pure
+# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
+# working with straight ascii.
-def _pprint_seq(seq,_nest_lvl=0):
+
+def _pprint_seq(seq, _nest_lvl=0):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
- fmt=u"[%s]" if hasattr(seq,'__setitem__') else u"(%s)"
- return fmt % ", ".join(pprint_thing(e,_nest_lvl+1) for e in seq)
+ fmt = u"[%s]" if hasattr(seq, '__setitem__') else u"(%s)"
+ return fmt % ", ".join(pprint_thing(e, _nest_lvl + 1) for e in seq)
+
-def pprint_thing(thing,_nest_lvl=0):
+def pprint_thing(thing, _nest_lvl=0):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
@@ -1011,7 +1078,7 @@ def pprint_thing(thing,_nest_lvl=0):
if thing is None:
result = ''
elif _is_sequence(thing) and _nest_lvl < print_config.pprint_nest_depth:
- result = _pprint_seq(thing,_nest_lvl)
+ result = _pprint_seq(thing, _nest_lvl)
else:
# when used internally in the package, everything
# passed in should be a unicode object or have a unicode
@@ -1021,17 +1088,19 @@ def pprint_thing(thing,_nest_lvl=0):
# so we resort to utf-8 with replacing errors
try:
- result = unicode(thing) # we should try this first
+ result = unicode(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
- result = str(thing).decode('utf-8',"replace")
+ result = str(thing).decode('utf-8', "replace")
- return unicode(result) # always unicode
+ return unicode(result) # always unicode
-def pprint_thing_encoded(object,encoding='utf-8',errors='replace'):
- value=pprint_thing(object) # get unicode representation of object
+
+def pprint_thing_encoded(object, encoding='utf-8', errors='replace'):
+ value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors)
+
def console_encode(object):
from pandas.core.format import print_config
"""
@@ -1041,4 +1110,4 @@ def console_encode(object):
set in print_config.encoding. Use this everywhere
where you output to the console.
"""
- return pprint_thing_encoded(object,print_config.encoding)
+ return pprint_thing_encoded(object, print_config.encoding)
diff --git a/pandas/core/daterange.py b/pandas/core/daterange.py
index 4bf6ee5a1517e..bfed7fcc6a734 100644
--- a/pandas/core/daterange.py
+++ b/pandas/core/daterange.py
@@ -37,7 +37,7 @@ def __setstate__(self, aug_state):
# for backwards compatibility
if len(aug_state) > 2:
tzinfo = aug_state[2]
- else: # pragma: no cover
+ else: # pragma: no cover
tzinfo = None
self.offset = offset
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 3bc3792200fc0..21528769648f5 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -56,6 +56,7 @@
-------
formatted : string (or unicode, depending on data and options)"""
+
class SeriesFormatter(object):
def __init__(self, series, buf=None, header=True, length=True,
@@ -152,6 +153,7 @@ def _strlen(x):
except UnicodeError:
return len(x)
+
class DataFrameFormatter(object):
"""
Render a DataFrame
@@ -243,9 +245,9 @@ def make_unicode(x):
return x.decode('utf-8')
strcols = map(lambda col: map(make_unicode, col), strcols)
else:
- # generally everything is plain strings, which has ascii
- # encoding. problem is when there is a char with value over 127
- # - everything then gets converted to unicode.
+ # Generally everything is plain strings, which has ascii
+ # encoding. Problem is when there is a char with value over
+ # 127. Everything then gets converted to unicode.
try:
map(lambda col: map(str, col), strcols)
except UnicodeError:
@@ -299,7 +301,7 @@ def to_latex(self, force_unicode=False, column_format=None):
nlevels = frame.index.nlevels
for i, row in enumerate(izip(*strcols)):
if i == nlevels:
- self.buf.write('\\hline\n') # End of header
+ self.buf.write('\\hline\n') # End of header
crow = [(x.replace('_', '\\_')
.replace('%', '\\%')
.replace('&', '\\&') if x else '{}') for x in row]
@@ -424,6 +426,7 @@ def __init__(self, formatter, classes=None):
_bold_row = self.fmt.kwds.get('bold_rows', False)
_temp = '<strong>%s</strong>'
+
def _maybe_bold_row(x):
if _bold_row:
return ([_temp % y for y in x] if isinstance(x, tuple)
@@ -432,7 +435,6 @@ def _maybe_bold_row(x):
return x
self._maybe_bold_row = _maybe_bold_row
-
def write(self, s, indent=0):
self.elements.append(' ' * indent + _str(s))
@@ -474,7 +476,7 @@ def write_result(self, buf):
indent = 0
frame = self.frame
- _classes = ['dataframe'] # Default class.
+ _classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
@@ -485,12 +487,12 @@ def write_result(self, buf):
indent)
if len(frame.columns) == 0 or len(frame.index) == 0:
- self.write('<tbody>', indent + self.indent_delta)
+ self.write('<tbody>', indent + self.indent_delta)
self.write_tr([repr(frame.index),
'Empty %s' % type(frame).__name__],
indent + (2 * self.indent_delta),
self.indent_delta)
- self.write('</tbody>', indent + self.indent_delta)
+ self.write('</tbody>', indent + self.indent_delta)
else:
indent += self.indent_delta
indent = self._write_header(indent)
@@ -535,10 +537,10 @@ def _column_header():
levels = self.columns.format(sparsify=True, adjoin=False,
names=False)
- col_values = self.columns.values
level_lengths = _get_level_lengths(levels)
- for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
+ for lnum, (records, values) in enumerate(
+ zip(level_lengths, levels)):
name = self.columns.names[lnum]
row = ['' if name is None else str(name)]
@@ -659,6 +661,7 @@ def _get_level_lengths(levels):
def _make_grouper():
record = {'count': 0}
+
def grouper(x):
if x != '':
record['count'] += 1
@@ -771,6 +774,7 @@ def _format(x):
return fmt_values
+
class FloatArrayFormatter(GenericArrayFormatter):
"""
@@ -797,7 +801,7 @@ def get_result(self):
if len(fmt_values) > 0:
maxlen = max(len(x) for x in fmt_values)
else:
- maxlen =0
+ maxlen = 0
too_long = maxlen > self.digits + 5
@@ -805,7 +809,7 @@ def get_result(self):
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
- has_small_values = ((abs_vals < 10**(-self.digits)) &
+ has_small_values = ((abs_vals < 10 ** (-self.digits)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
@@ -842,6 +846,7 @@ def get_result(self):
fmt_values = [formatter(x) for x in self.values]
return _make_fixed_width(fmt_values, self.justify)
+
def _format_datetime64(x, tz=None):
if isnull(x):
return 'NaT'
@@ -882,6 +887,7 @@ def just(x):
return [just(x) for x in strings]
+
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros and decimal points
@@ -912,6 +918,7 @@ def single_column_table(column, align=None, style=None):
table += '</tbody></table>'
return table
+
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
@@ -919,6 +926,7 @@ def single_row_table(row): # pragma: no cover
table += '</tr></tbody></table>'
return table
+
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
@@ -926,10 +934,10 @@ def _has_names(index):
return index.name is not None
-
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Global formatting options
+
def set_printoptions(precision=None, column_space=None, max_rows=None,
max_columns=None, colheader_justify=None,
max_colwidth=None, notebook_repr_html=None,
@@ -985,9 +993,11 @@ def set_printoptions(precision=None, column_space=None, max_rows=None,
if encoding is not None:
print_config.encoding = encoding
+
def reset_printoptions():
print_config.reset()
+
class EngFormatter(object):
"""
Formats float values according to engineering format.
@@ -1052,7 +1062,7 @@ def __call__(self, num):
dnum = -dnum
if dnum != 0:
- pow10 = decimal.Decimal(int(math.floor(dnum.log10()/3)*3))
+ pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
@@ -1068,16 +1078,17 @@ def __call__(self, num):
else:
prefix = 'E+%02d' % int_pow10
- mant = sign*dnum/(10**pow10)
+ mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = u"% g%s"
else:
- format_str = (u"%% .%if%%s" % self.accuracy )
+ format_str = (u"%% .%if%%s" % self.accuracy)
formatted = format_str % (mant, prefix)
- return formatted #.strip()
+ return formatted #.strip()
+
def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False):
"""
@@ -1087,10 +1098,10 @@ def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False):
See also EngFormatter.
"""
- if precision is not None: # pragma: no cover
+ if precision is not None: # pragma: no cover
import warnings
warnings.warn("'precision' parameter in set_eng_float_format is "
- "being renamed to 'accuracy'" , FutureWarning)
+ "being renamed to 'accuracy'", FutureWarning)
accuracy = precision
print_config.float_format = EngFormatter(accuracy, use_eng_prefix)
@@ -1126,17 +1137,17 @@ def detect_encoding(self):
encoding = None
try:
- encoding=sys.stdin.encoding
+ encoding = sys.stdin.encoding
except AttributeError:
pass
- if not encoding or encoding =='ascii': # try again for something better
+ if not encoding or encoding == 'ascii': # try again for better
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
- if not encoding: # when all else fails. this will usually be "ascii"
+ if not encoding: # when all else fails. this will usually be "ascii"
encoding = sys.getdefaultencoding()
return encoding
@@ -1162,7 +1173,7 @@ def _put_lines(buf, lines):
599502.4276, 620921.8593, 620898.5294, 552427.1093,
555221.2193, 519639.7059, 388175.7 , 379199.5854,
614898.25 , 504833.3333, 560600. , 941214.2857,
- 1134250. , 1219550. , 855736.85 , 1042615.4286,
+ 1134250. , 1219550. , 855736.85 , 1042615.4286,
722621.3043, 698167.1818, 803750. ])
fmt = FloatArrayFormatter(arr, digits=7)
print fmt.get_result()
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c18010ab9578e..06a290b6edfaf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -24,8 +24,8 @@
import numpy as np
import numpy.ma as ma
-from pandas.core.common import (isnull, notnull, PandasError, _try_sort,\
- _default_index,_is_sequence)
+from pandas.core.common import (isnull, notnull, PandasError, _try_sort,
+ _default_index, _is_sequence)
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import _NDFrameIndexer, _maybe_droplevels
@@ -170,11 +170,14 @@
# Custom error class for update
-class DataConflictError(Exception): pass
+
+class DataConflictError(Exception):
+ pass
#----------------------------------------------------------------------
# Factory helper methods
+
def _arith_method(op, name, default_axis='columns'):
def na_op(x, y):
try:
@@ -228,6 +231,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
return f
+
def _flex_comp_method(op, name, default_axis='columns'):
def na_op(x, y):
@@ -733,7 +737,9 @@ def dot(self, other):
lvals = left.values
rvals = right.values
if isinstance(other, DataFrame):
- return DataFrame(np.dot(lvals, rvals), index=self.index, columns=other.columns)
+ return DataFrame(np.dot(lvals, rvals),
+ index=self.index,
+ columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
else:
@@ -798,8 +804,8 @@ def to_dict(self, outtype='dict'):
elif outtype.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in self.iteritems())
elif outtype.lower().startswith('s'):
- return dict((k, v) for k,v in self.iteritems())
- else: # pragma: no cover
+ return dict((k, v) for k, v in self.iteritems())
+ else: # pragma: no cover
raise ValueError("outtype %s not understood" % outtype)
@classmethod
@@ -833,7 +839,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None,
columns = list(columns)
if len(algos.unique(columns)) < len(columns):
- raise ValueError('Non-unique columns not yet supported in from_records')
+ raise ValueError('Non-unique columns not yet supported in '
+ 'from_records')
if names is not None: # pragma: no cover
columns = names
@@ -1166,7 +1173,6 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
f = com._get_handle(path_or_buf, mode, encoding=encoding)
close = True
-
if quoting is None:
quoting = csv.QUOTE_MINIMAL
@@ -2019,7 +2025,7 @@ def xs(self, key, axis=0, level=None, copy=True):
new_values = self._data.fast_2d_xs(loc, copy=copy)
return Series(new_values, index=self.columns,
name=self.index[loc])
- else: # isinstance(loc, slice) or loc.dtype == np.bool_:
+ else: # isinstance(loc, slice) or loc.dtype == np.bool_:
result = self[loc]
result.index = new_index
return result
@@ -2488,15 +2494,15 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
- If the columns have multiple levels, determines how the other levels
- are named. If None then the index name is repeated.
+ If the columns have multiple levels, determines how the other
+ levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
if inplace:
- new_obj = self
+ new_obj = self
else:
new_obj = self.copy()
@@ -2786,7 +2792,7 @@ def sort(self, columns=None, column=None, axis=0, ascending=True,
-------
sorted : DataFrame
"""
- if column is not None: # pragma: no cover
+ if column is not None: # pragma: no cover
import warnings
warnings.warn("column is deprecated, use columns", FutureWarning)
columns = column
@@ -3048,7 +3054,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
return self
if isinstance(to_replace, dict):
- if isinstance(value, dict): # {'A' : np.nan} -> {'A' : 0}
+ if isinstance(value, dict): # {'A' : np.nan} -> {'A' : 0}
return self._replace_both_dict(to_replace, value, inplace)
elif not isinstance(value, (list, np.ndarray)):
@@ -3067,7 +3073,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
new_data = self._data if inplace else self.copy()._data
new_data._replace_list(to_replace, value)
- else: # [np.nan, ''] -> 0
+ else: # [np.nan, ''] -> 0
new_data = self._data.replace(to_replace, value,
inplace=inplace)
@@ -3077,9 +3083,9 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
else:
return self._constructor(new_data)
else:
- if isinstance(value, dict): # np.nan -> {'A' : 0, 'B' : -1}
+ if isinstance(value, dict): # np.nan -> {'A' : 0, 'B' : -1}
return self._replace_dest_dict(to_replace, value, inplace)
- elif not isinstance(value, (list, np.ndarray)): # np.nan -> 0
+ elif not isinstance(value, (list, np.ndarray)): # np.nan -> 0
new_data = self._data.replace(to_replace, value,
inplace=inplace)
if inplace:
@@ -3089,7 +3095,7 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
return self._constructor(new_data)
raise ValueError('Invalid to_replace type: %s' %
- type(to_replace)) # pragma: no cover
+ type(to_replace)) # pragma: no cover
def _interpolate(self, to_replace, method, axis, inplace, limit):
if self._is_mixed_type and axis == 1:
@@ -3833,7 +3839,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
if hasattr(e, 'args'):
k = res_index[i]
e.args = e.args + ('occurred at index %s' % str(k),)
- except NameError: # pragma: no cover
+ except NameError: # pragma: no cover
# no k defined yet
pass
raise
@@ -4076,7 +4082,7 @@ def corr(self, method='pearson'):
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
- for j, bc in enumerate(mat):
+ for j, bc in enumerate(mat):
valid = mask[i] & mask[j]
if not valid.any():
c = np.nan
@@ -4183,7 +4189,7 @@ def describe(self, percentile_width=50):
for k, v in self.iteritems()),
columns=self.columns)
- lb = .5 * (1. - percentile_width/100.)
+ lb = .5 * (1. - percentile_width / 100.)
ub = 1. - lb
def pretty_name(x):
@@ -4448,7 +4454,6 @@ def skew(self, axis=0, skipna=True, level=None):
return self._reduce(nanops.nanskew, axis=axis, skipna=skipna,
numeric_only=None)
-
@Substitution(name='unbiased kurtosis', shortname='kurt',
na_action=_doc_exclude_na, extras='')
@Appender(_stat_doc)
@@ -4971,6 +4976,7 @@ def _to_sdict(data, columns, coerce_float=False):
data = map(tuple, data)
return _list_to_sdict(data, columns, coerce_float=coerce_float)
+
def _list_to_sdict(data, columns, coerce_float=False):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
@@ -4984,6 +4990,7 @@ def _list_to_sdict(data, columns, coerce_float=False):
return _convert_object_array(content, columns,
coerce_float=coerce_float)
+
def _list_of_series_to_sdict(data, columns, coerce_float=False):
from pandas.core.index import _get_combined_index
@@ -5038,6 +5045,7 @@ def _convert_object_array(content, columns, coerce_float=False):
for c, vals in zip(columns, content))
return sdict, columns
+
def _get_names_from_index(data):
index = range(len(data))
has_some_name = any([s.name is not None for s in data])
@@ -5055,6 +5063,7 @@ def _get_names_from_index(data):
return index
+
def _homogenize(data, index, columns, dtype=None):
from pandas.core.series import _sanitize_array
@@ -5109,6 +5118,7 @@ def _homogenize(data, index, columns, dtype=None):
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
+
def install_ipython_completers(): # pragma: no cover
"""Register the DataFrame type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
@@ -5136,6 +5146,7 @@ def complete_dataframe(obj, prev_completions):
DataFrame.plot = gfx.plot_frame
DataFrame.hist = gfx.hist_frame
+
def boxplot(self, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, **kwds):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f0c70522da8a0..97b10e532c9de 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8,6 +8,7 @@
from pandas.tseries.offsets import DateOffset
import pandas.core.common as com
+
class PandasError(Exception):
pass
@@ -15,8 +16,8 @@ class PandasError(Exception):
class PandasObject(object):
_AXIS_NUMBERS = {
- 'index' : 0,
- 'columns' : 1
+ 'index': 0,
+ 'columns': 1
}
_AXIS_ALIASES = {}
@@ -274,7 +275,7 @@ def select(self, crit, axis=0):
else:
new_axis = axis
- return self.reindex(**{axis_name : new_axis})
+ return self.reindex(**{axis_name: new_axis})
def drop(self, labels, axis=0, level=None):
"""
@@ -300,7 +301,7 @@ def drop(self, labels, axis=0, level=None):
else:
new_axis = axis.drop(labels)
- return self.reindex(**{axis_name : new_axis})
+ return self.reindex(**{axis_name: new_axis})
def sort_index(self, axis=0, ascending=True):
"""
@@ -326,7 +327,7 @@ def sort_index(self, axis=0, ascending=True):
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
- return self.reindex(**{axis_name : new_axis})
+ return self.reindex(**{axis_name: new_axis})
@property
def ix(self):
@@ -483,13 +484,13 @@ def _clear_item_cache(self):
self._item_cache.clear()
def _set_item(self, key, value):
- if hasattr(self,'columns') and isinstance(self.columns, MultiIndex):
+ if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(key, tuple):
key = (key,)
if len(key) != self.columns.nlevels:
- key += ('',)*(self.columns.nlevels - len(key))
+ key += ('',) * (self.columns.nlevels - len(key))
self._data.set(key, value)
try:
@@ -504,7 +505,7 @@ def __delitem__(self, key):
deleted = False
maybe_shortcut = False
- if hasattr(self,'columns') and isinstance(self.columns, MultiIndex):
+ if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
@@ -513,10 +514,10 @@ def __delitem__(self, key):
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
- if not isinstance(key,tuple):
+ if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
- if isinstance(col,tuple) and col[:len(key)] == key:
+ if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
@@ -702,7 +703,7 @@ def cummax(self, axis=None, skipna=True):
if skipna:
np.putmask(result, mask, np.nan)
else:
- result = np.maximum.accumulate(y,axis)
+ result = np.maximum.accumulate(y, axis)
return self._wrap_array(result, self.axes, copy=False)
def cummin(self, axis=None, skipna=True):
@@ -738,7 +739,7 @@ def cummin(self, axis=None, skipna=True):
if skipna:
np.putmask(result, mask, np.nan)
else:
- result = np.minimum.accumulate(y,axis)
+ result = np.minimum.accumulate(y, axis)
return self._wrap_array(result, self.axes, copy=False)
def copy(self, deep=True):
@@ -934,6 +935,7 @@ def tz_localize(self, tz, axis=0, copy=True):
# Good for either Series or DataFrame
+
def truncate(self, before=None, after=None, copy=True):
"""Function truncate a sorted DataFrame / Series before and/or after
some particular dates.
@@ -965,4 +967,3 @@ def truncate(self, before=None, after=None, copy=True):
result = result.copy()
return result
-
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index f0f6f7b2a8c63..e158e164e4a3b 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -18,12 +18,15 @@
class GroupByError(Exception):
pass
+
class DataError(GroupByError):
pass
+
class SpecificationError(GroupByError):
pass
+
def _groupby_function(name, alias, npfunc, numeric_only=True):
def f(self):
try:
@@ -36,6 +39,7 @@ def f(self):
return f
+
def _first_compat(x, axis=0):
x = np.asarray(x)
x = x[com.notnull(x)]
@@ -43,6 +47,7 @@ def _first_compat(x, axis=0):
return np.nan
return x[0]
+
def _last_compat(x, axis=0):
x = np.asarray(x)
x = x[com.notnull(x)]
@@ -166,7 +171,7 @@ def indices(self):
@property
def name(self):
if self._selection is None:
- return None # 'result'
+ return None # 'result'
else:
return self._selection
@@ -205,6 +210,7 @@ def wrapper(*args, **kwargs):
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
+
def curried(x):
return f(x, *args, **kwargs)
@@ -458,6 +464,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False):
return result
+
def _generate_groups(obj, group_index, ngroups, axis=0):
if isinstance(obj, NDFrame) and not isinstance(obj, DataFrame):
factory = obj._constructor
@@ -468,23 +475,26 @@ def _generate_groups(obj, group_index, ngroups, axis=0):
return generate_groups(obj, group_index, ngroups,
axis=axis, factory=factory)
+
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
- else: # pragma: no cover
+ else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
+
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
+
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
@@ -495,6 +505,7 @@ def _is_indexed_like(obj, axes):
return False
+
class Grouper(object):
"""
@@ -531,7 +542,7 @@ def get_iterator(self, data, axis=0):
groups = indices.keys()
try:
groups = sorted(groups)
- except Exception: # pragma: no cover
+ except Exception: # pragma: no cover
pass
for name in groups:
@@ -658,29 +669,29 @@ def get_group_levels(self):
# Aggregation functions
_cython_functions = {
- 'add' : lib.group_add,
- 'prod' : lib.group_prod,
- 'min' : lib.group_min,
- 'max' : lib.group_max,
- 'mean' : lib.group_mean,
- 'median' : lib.group_median,
- 'var' : lib.group_var,
- 'std' : lib.group_var,
+ 'add': lib.group_add,
+ 'prod': lib.group_prod,
+ 'min': lib.group_min,
+ 'max': lib.group_max,
+ 'mean': lib.group_mean,
+ 'median': lib.group_median,
+ 'var': lib.group_var,
+ 'std': lib.group_var,
'first': lambda a, b, c, d: lib.group_nth(a, b, c, d, 1),
'last': lib.group_last
}
_cython_object_functions = {
- 'first' : lambda a, b, c, d: lib.group_nth_object(a, b, c, d, 1),
- 'last' : lib.group_last_object
+ 'first': lambda a, b, c, d: lib.group_nth_object(a, b, c, d, 1),
+ 'last': lib.group_last_object
}
_cython_transforms = {
- 'std' : np.sqrt
+ 'std': np.sqrt
}
_cython_arity = {
- 'ohlc' : 4, # OHLC
+ 'ohlc': 4, # OHLC
}
_name_functions = {}
@@ -840,18 +851,17 @@ def generate_bins_generic(values, binner, closed):
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
- if values[lenidx-1] > binner[lenbin-1]:
+ if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
- bins = np.empty(lenbin - 1, dtype=np.int64)
+ bins = np.empty(lenbin - 1, dtype=np.int64)
- j = 0 # index into values
- bc = 0 # bin count
+ j = 0 # index into values
+ bc = 0 # bin count
- # linear scan, presume nothing about values/binner except that it
- # fits ok
- for i in range(0, lenbin-1):
- r_bin = binner[i+1]
+ # linear scan, presume nothing about values/binner except that it fits ok
+ for i in range(0, lenbin - 1):
+ r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
@@ -921,25 +931,25 @@ def names(self):
# cython aggregation
_cython_functions = {
- 'add' : lib.group_add_bin,
- 'prod' : lib.group_prod_bin,
- 'mean' : lib.group_mean_bin,
- 'min' : lib.group_min_bin,
- 'max' : lib.group_max_bin,
- 'var' : lib.group_var_bin,
- 'std' : lib.group_var_bin,
- 'ohlc' : lib.group_ohlc,
+ 'add': lib.group_add_bin,
+ 'prod': lib.group_prod_bin,
+ 'mean': lib.group_mean_bin,
+ 'min': lib.group_min_bin,
+ 'max': lib.group_max_bin,
+ 'var': lib.group_var_bin,
+ 'std': lib.group_var_bin,
+ 'ohlc': lib.group_ohlc,
'first': lambda a, b, c, d: lib.group_nth_bin(a, b, c, d, 1),
'last': lib.group_last_bin
}
_cython_object_functions = {
- 'first' : lambda a, b, c, d: lib.group_nth_bin_object(a, b, c, d, 1),
- 'last' : lib.group_last_bin_object
+ 'first': lambda a, b, c, d: lib.group_nth_bin_object(a, b, c, d, 1),
+ 'last': lib.group_last_bin_object
}
_name_functions = {
- 'ohlc' : lambda *args: ['open', 'high', 'low', 'close']
+ 'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
@@ -1105,6 +1115,7 @@ def _make_labels(self):
self._counts = counts
_groups = None
+
@property
def groups(self):
if self._groups is None:
@@ -1184,9 +1195,11 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
return grouper, exclusions
+
def _is_label_like(val):
return isinstance(val, basestring) or np.isscalar(val)
+
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
@@ -1201,6 +1214,7 @@ def _convert_grouper(axis, grouper):
else:
return grouper
+
class SeriesGroupBy(GroupBy):
def aggregate(self, func_or_funcs, *args, **kwargs):
@@ -1257,7 +1271,7 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
if isinstance(func_or_funcs, basestring):
return getattr(self, func_or_funcs)(*args, **kwargs)
- if hasattr(func_or_funcs,'__iter__'):
+ if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
@@ -1393,6 +1407,7 @@ def transform(self, func, *args, **kwargs):
return result
+
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
@@ -1686,7 +1701,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if (isinstance(values[0], Series) and
not _all_indexes_same([x.index for x in values])):
return self._concat_objects(keys, values,
- not_indexed_same=not_indexed_same)
+ not_indexed_same=not_indexed_same)
if self.axis == 0:
stacked_values = np.vstack([np.asarray(x)
@@ -1743,7 +1758,7 @@ def transform(self, func, *args, **kwargs):
res = group.apply(wrapper, axis=self.axis)
except TypeError:
return self._transform_item_by_item(obj, wrapper)
- except Exception: # pragma: no cover
+ except Exception: # pragma: no cover
res = wrapper(group)
# broadcasting
@@ -1892,6 +1907,7 @@ def _wrap_agged_blocks(self, blocks):
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
+
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
@@ -2023,6 +2039,7 @@ def _get_slice(slob):
assert(start < end)
yield i, _get_slice(slice(start, end))
+
def get_group_index(label_list, shape):
"""
For the particular label_list, gets the offsets into the hypothetical list
@@ -2036,7 +2053,7 @@ def get_group_index(label_list, shape):
group_index = np.zeros(n, dtype=np.int64)
mask = np.zeros(n, dtype=bool)
for i in xrange(len(shape)):
- stride = np.prod([x for x in shape[i+1:]], dtype=np.int64)
+ stride = np.prod([x for x in shape[i + 1:]], dtype=np.int64)
group_index += com._ensure_int64(label_list[i]) * stride
mask |= label_list[i] < 0
@@ -2051,6 +2068,7 @@ def _int64_overflow_possible(shape):
return the_prod >= _INT64_MAX
+
def decons_group_index(comp_labels, shape):
# reconstruct labels
label_list = []
@@ -2099,6 +2117,7 @@ def _lexsort_indexer(keys):
shape.append(len(rizer.uniques))
return _indexer_from_factorized(labels, shape)
+
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
@@ -2139,6 +2158,7 @@ def _get_indices_dict(label_list, keys):
#----------------------------------------------------------------------
# sorting levels...cleverly?
+
def _compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
@@ -2162,6 +2182,7 @@ def _compress_group_index(group_index, sort=True):
return comp_ids, obs_group_ids
+
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
@@ -2196,15 +2217,19 @@ def _reorder_by_uniques(uniques, labels):
np.var: 'var'
}
+
def _intercept_function(func):
return _func_table.get(func, func)
+
def _intercept_cython(func):
return _cython_table.get(func)
+
def _groupby_indices(values):
return lib.groupby_indices(com._ensure_object(values))
+
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
@@ -2222,6 +2247,7 @@ def numpy_groupby(data, labels, axis=0):
from pandas.util import py3compat
import sys
+
def install_ipython_completers(): # pragma: no cover
"""Register the DataFrame type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
@@ -2229,7 +2255,7 @@ def install_ipython_completers(): # pragma: no cover
@complete_object.when_type(DataFrameGroupBy)
def complete_dataframe(obj, prev_completions):
- return prev_completions + [c for c in obj.obj.columns \
+ return prev_completions + [c for c in obj.obj.columns
if isinstance(c, basestring) and py3compat.isidentifier(c)]
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 08d1c593d42ca..c94b3baee1f26 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -27,7 +27,7 @@ def wrapper(self, other):
result = func(other)
try:
return result.view(np.ndarray)
- except: # pragma: no cover
+ except: # pragma: no cover
return result
return wrapper
@@ -137,7 +137,7 @@ def __repr__(self):
prepr = com.pprint_thing(self)
else:
prepr = com.pprint_thing_encoded(self)
- return 'Index(%s, dtype=%s)' % (prepr,self.dtype)
+ return 'Index(%s, dtype=%s)' % (prepr, self.dtype)
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
@@ -570,7 +570,7 @@ def union(self, other):
# contained in
try:
result = np.sort(self.values)
- except TypeError: # pragma: no cover
+ except TypeError: # pragma: no cover
result = self.values
# for subclasses
@@ -1027,7 +1027,7 @@ def _join_monotonic(self, other, how='left', return_indexers=False):
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
- join_index, lidx, ridx = self._inner_indexer(sv,ov)
+ join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
@@ -1229,8 +1229,6 @@ def _wrap_joined_index(self, joined, other):
return Int64Index(joined, name=name)
-
-
class MultiIndex(Index):
"""
Implements multi-level, a.k.a. hierarchical, index object for pandas
@@ -1291,11 +1289,12 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None):
def __array_finalize__(self, obj):
"""
- Update custom MultiIndex attributes when a new array is created by numpy,
- e.g. when calling ndarray.view()
+ Update custom MultiIndex attributes when a new array is created by
+ numpy, e.g. when calling ndarray.view()
"""
if not isinstance(obj, type(self)):
- # Only relevant if this array is being created from an Index instance.
+ # Only relevant if this array is being created from an Index
+ # instance.
return
self.levels = list(getattr(obj, 'levels', []))
@@ -1345,7 +1344,7 @@ def _from_elements(values, labels=None, levels=None, names=None,
index = values.view(MultiIndex)
index.levels = levels
index.labels = labels
- index.names = names
+ index.names = names
index.sortorder = sortorder
return index
@@ -1412,7 +1411,7 @@ def has_duplicates(self):
shape = [len(lev) for lev in self.levels]
group_index = np.zeros(len(self), dtype='i8')
for i in xrange(len(shape)):
- stride = np.prod([x for x in shape[i+1:]], dtype='i8')
+ stride = np.prod([x for x in shape[i + 1:]], dtype='i8')
group_index += self.labels[i] * stride
if len(np.unique(group_index)) < len(group_index):
@@ -1587,7 +1586,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
if isinstance(tuples, np.ndarray):
if isinstance(tuples, Index):
- tuples = tuples.values
+ tuples = tuples.values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
@@ -2430,10 +2429,12 @@ def _ensure_index(index_like):
return Index(index_like)
+
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise Exception('do not recognize join method %s' % method)
+
# TODO: handle index names!
def _get_combined_index(indexes, intersect=False):
indexes = _get_distinct_indexes(indexes)
@@ -2507,6 +2508,7 @@ def _sanitize_and_check(indexes):
else:
return indexes, 'array'
+
def _handle_legacy_indexes(indexes):
from pandas.core.daterange import DateRange
from pandas.tseries.index import DatetimeIndex
@@ -2526,6 +2528,7 @@ def _handle_legacy_indexes(indexes):
return converted
+
def _get_consensus_names(indexes):
consensus_name = indexes[0].names
for index in indexes[1:]:
@@ -2534,6 +2537,7 @@ def _get_consensus_names(indexes):
break
return consensus_name
+
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex
klasses = DatetimeIndex, PeriodIndex
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 13fa0b2af1adc..531431c065082 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -10,6 +10,7 @@
# "null slice"
_NS = slice(None, None)
+
def _is_sequence(x):
try:
iter(x)
@@ -18,6 +19,7 @@ def _is_sequence(x):
except Exception:
return False
+
class IndexingError(Exception):
pass
@@ -587,6 +589,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
# 32-bit floating point machine epsilon
_eps = np.finfo('f4').eps
+
def _is_index_slice(obj):
def _is_valid_index(x):
return (com.is_integer(x) or com.is_float(x)
@@ -599,6 +602,7 @@ def _crit(v):
return not both_none and (_crit(obj.start) and _crit(obj.stop))
+
def _is_int_slice(obj):
def _is_valid_index(x):
return com.is_integer(x)
@@ -610,6 +614,7 @@ def _crit(v):
return not both_none and (_crit(obj.start) and _crit(obj.stop))
+
def _is_float_slice(obj):
def _is_valid_index(x):
return com.is_float(x)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index c0f2ba7654e80..9a1785b9518af 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -12,11 +12,13 @@
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
+
def _bottleneck_switch(bn_name, alt, zero_value=None, **kwargs):
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
+
def f(values, axis=None, skipna=True, **kwds):
if len(kwargs) > 0:
for k, v in kwargs.iteritems():
@@ -46,6 +48,7 @@ def f(values, axis=None, skipna=True, **kwds):
return f
+
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
@@ -57,6 +60,7 @@ def _has_infs(result):
else:
return np.isinf(result) or np.isneginf(result)
+
def nanany(values, axis=None, skipna=True):
mask = isnull(values)
@@ -65,6 +69,7 @@ def nanany(values, axis=None, skipna=True):
np.putmask(values, mask, False)
return values.any(axis)
+
def nanall(values, axis=None, skipna=True):
mask = isnull(values)
@@ -73,6 +78,7 @@ def nanall(values, axis=None, skipna=True):
np.putmask(values, mask, True)
return values.all(axis)
+
def _nansum(values, axis=None, skipna=True):
mask = isnull(values)
@@ -85,6 +91,7 @@ def _nansum(values, axis=None, skipna=True):
return the_sum
+
def _nanmean(values, axis=None, skipna=True):
mask = isnull(values)
@@ -104,6 +111,7 @@ def _nanmean(values, axis=None, skipna=True):
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
+
def _nanmedian(values, axis=None, skipna=True):
def get_median(x):
mask = notnull(x)
@@ -119,6 +127,7 @@ def get_median(x):
else:
return get_median(values)
+
def _nanvar(values, axis=None, skipna=True, ddof=1):
mask = isnull(values)
@@ -135,6 +144,7 @@ def _nanvar(values, axis=None, skipna=True, ddof=1):
XX = _ensure_numeric((values ** 2).sum(axis))
return np.fabs((XX - X ** 2 / count) / (count - ddof))
+
def _nanmin(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type,
@@ -160,6 +170,7 @@ def _nanmin(values, axis=None, skipna=True):
return _maybe_null_out(result, axis, mask)
+
def _nanmax(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type,
@@ -186,6 +197,7 @@ def _nanmax(values, axis=None, skipna=True):
return _maybe_null_out(result, axis, mask)
+
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
@@ -198,6 +210,7 @@ def nanargmax(values, axis=None, skipna=True):
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
+
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
@@ -217,6 +230,7 @@ def nanargmin(values, axis=None, skipna=True):
nanmin = _bottleneck_switch('nanmin', _nanmin)
nanmax = _bottleneck_switch('nanmax', _nanmax)
+
def nanskew(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -249,6 +263,7 @@ def nanskew(values, axis=None, skipna=True):
return np.nan
return result
+
def nankurt(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -269,8 +284,8 @@ def nankurt(values, axis=None, skipna=True):
C = _zero_out_fperr(C)
D = _zero_out_fperr(D)
- result = (((count*count - 1.)*D / (B*B) - 3*((count-1.)**2)) /
- ((count - 2.)*(count-3.)))
+ result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) /
+ ((count - 2.) * (count - 3.)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
@@ -281,6 +296,7 @@ def nankurt(values, axis=None, skipna=True):
return np.nan
return result
+
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
@@ -289,6 +305,7 @@ def nanprod(values, axis=None, skipna=True):
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
+
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None:
@@ -307,6 +324,7 @@ def _maybe_arg_null_out(result, axis, mask, skipna):
result[na_mask] = -1
return result
+
def _get_counts(mask, axis):
if axis is not None:
count = (mask.shape[axis] - mask.sum(axis)).astype(float)
@@ -315,6 +333,7 @@ def _get_counts(mask, axis):
return count
+
def _maybe_null_out(result, axis, mask):
if axis is not None:
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
@@ -328,12 +347,14 @@ def _maybe_null_out(result, axis, mask):
return result
+
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return 0 if np.abs(arg) < 1e-14 else arg
+
def nancorr(a, b, method='pearson'):
"""
a, b: ndarrays
@@ -351,27 +372,31 @@ def nancorr(a, b, method='pearson'):
f = get_corr_func(method)
return f(a, b)
+
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
+
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
+
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
- 'pearson' : _pearson,
- 'kendall' : _kendall,
- 'spearman' : _spearman
+ 'pearson': _pearson,
+ 'kendall': _kendall,
+ 'spearman': _spearman
}
return _cor_methods[method]
+
def nancov(a, b):
assert(len(a) == len(b))
@@ -385,6 +410,7 @@ def nancov(a, b):
return np.cov(a, b)[0, 1]
+
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if x.dtype == np.object_:
@@ -401,6 +427,7 @@ def _ensure_numeric(x):
import operator
+
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
@@ -424,6 +451,7 @@ def f(x, y):
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
+
def unique1d(values):
"""
Hash table-based unique
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 211434ab07154..0efbb5284d584 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -29,7 +29,7 @@ def _ensure_like_indices(time, panels):
"""
n_time = len(time)
n_panel = len(panels)
- u_panels = np.unique(panels) # this sorts!
+ u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
@@ -37,6 +37,7 @@ def _ensure_like_indices(time, panels):
panels = np.repeat(u_panels, len(u_time))
return time, panels
+
def panel_index(time, panels, names=['time', 'panel']):
"""
Returns a multi-index suitable for a panel-like DataFrame
@@ -84,9 +85,11 @@ def panel_index(time, panels, names=['time', 'panel']):
levels = [time_factor.levels, panel_factor.levels]
return MultiIndex(levels, labels, sortorder=None, names=names)
+
class PanelError(Exception):
pass
+
def _arith_method(func, name):
# work only for scalars
@@ -99,6 +102,7 @@ def f(self, other):
f.__name__ = name
return f
+
def _panel_arith_method(op, name):
@Substitution(op)
def f(self, other, axis='items'):
@@ -144,20 +148,20 @@ def f(self, other, axis='items'):
class Panel(NDFrame):
_AXIS_NUMBERS = {
- 'items' : 0,
- 'major_axis' : 1,
- 'minor_axis' : 2
+ 'items': 0,
+ 'major_axis': 1,
+ 'minor_axis': 2
}
_AXIS_ALIASES = {
- 'major' : 'major_axis',
- 'minor' : 'minor_axis'
+ 'major': 'major_axis',
+ 'minor': 'minor_axis'
}
_AXIS_NAMES = {
- 0 : 'items',
- 1 : 'major_axis',
- 2 : 'minor_axis'
+ 0: 'items',
+ 1: 'major_axis',
+ 2: 'minor_axis'
}
# major
@@ -223,7 +227,7 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
- else: # pragma: no cover
+ else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
@@ -259,7 +263,7 @@ def _init_dict(self, data, axes, dtype=None):
minor = _extract_axis(data, axis=1)
axes = [items, major, minor]
- reshaped_data = data.copy() # shallow
+ reshaped_data = data.copy() # shallow
item_shape = len(major), len(minor)
for item in items:
@@ -364,7 +368,6 @@ def _init_matrix(self, data, axes, dtype=None, copy=False):
block = make_block(values, items, items)
return BlockManager([block], fixed_axes)
-
#----------------------------------------------------------------------
# Array interface
@@ -561,7 +564,8 @@ def set_value(self, item, major, minor, value):
return result.set_value(item, major, minor, value)
def _box_item_values(self, key, values):
- return DataFrame(values, index=self.major_axis, columns=self.minor_axis)
+ return DataFrame(values, index=self.major_axis,
+ columns=self.minor_axis)
def __getattr__(self, name):
"""After regular attribute access, try looking up the name of an item.
@@ -617,13 +621,13 @@ def __setstate__(self, state):
# old Panel pickle
if isinstance(state, BlockManager):
self._data = state
- elif len(state) == 4: # pragma: no cover
+ elif len(state) == 4: # pragma: no cover
self._unpickle_panel_compat(state)
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('unrecognized pickle')
self._item_cache = {}
- def _unpickle_panel_compat(self, state): # pragma: no cover
+ def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
@@ -999,7 +1003,7 @@ def swapaxes(self, axis1='major', axis2='minor', copy=True):
if i == j:
raise ValueError('Cannot specify the same axis')
- mapping = {i : j, j : i}
+ mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(3))
@@ -1267,7 +1271,7 @@ def truncate(self, before=None, after=None, axis='major'):
beg_slice, end_slice = index.slice_locs(before, after)
new_index = index[beg_slice:end_slice]
- return self.reindex(**{axis : new_index})
+ return self.reindex(**{axis: new_index})
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
@@ -1303,8 +1307,8 @@ def join(self, other, how='left', lsuffix='', rsuffix=''):
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
- raise ValueError('Suffixes not supported when passing multiple '
- 'panels')
+ raise ValueError('Suffixes not supported when passing '
+ 'multiple panels')
if how == 'left':
how = 'outer'
@@ -1364,6 +1368,7 @@ def _get_join_index(self, other, how):
WidePanel = Panel
LongPanel = DataFrame
+
def _prep_ndarray(values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
@@ -1376,6 +1381,7 @@ def _prep_ndarray(values, copy=True):
assert(values.ndim == 3)
return values
+
def _homogenize_dict(frames, intersect=True, dtype=None):
"""
Conform set of DataFrame-like objects to either an intersection
@@ -1446,6 +1452,7 @@ def _extract_axis(data, axis=0, intersect=False):
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
+
def install_ipython_completers(): # pragma: no cover
"""Register the Panel type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
@@ -1463,4 +1470,3 @@ def complete_dataframe(obj, prev_completions):
install_ipython_completers()
except Exception:
pass
-
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index d8c9087a34437..044870fc4a6f2 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -74,7 +74,7 @@ def __init__(self, values, index, level=-1, value_columns=None):
v = self.level
lshape = self.index.levshape
- self.full_shape = np.prod(lshape[:v] + lshape[v+1:]), lshape[v]
+ self.full_shape = np.prod(lshape[:v] + lshape[v + 1:]), lshape[v]
self._make_sorted_values_labels()
self._make_selectors()
@@ -84,8 +84,8 @@ def _make_sorted_values_labels(self):
labs = self.index.labels
levs = self.index.levels
- to_sort = labs[:v] + labs[v+1:] + [labs[v]]
- sizes = [len(x) for x in levs[:v] + levs[v+1:] + [levs[v]]]
+ to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
+ sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
group_index = get_group_index(to_sort, sizes)
max_groups = np.prod(sizes)
@@ -93,7 +93,7 @@ def _make_sorted_values_labels(self):
comp_index, obs_ids = _compress_group_index(group_index)
ngroups = len(obs_ids)
else:
- comp_index, ngroups = group_index, max_groups
+ comp_index, ngroups = group_index, max_groups
indexer = lib.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
@@ -280,6 +280,7 @@ def _unstack_multiple(data, clocs):
return unstacked
+
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
@@ -292,6 +293,7 @@ def pivot(self, index=None, columns=None, values=None):
index=[self[index], self[columns]])
return indexed.unstack(columns)
+
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
@@ -324,6 +326,7 @@ def pivot_simple(index, columns, values):
series = series.sortlevel(0)
return series.unstack()
+
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
@@ -349,6 +352,7 @@ def _slow_pivot(index, columns, values):
return DataFrame(tree)
+
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
@@ -362,11 +366,12 @@ def unstack(obj, level):
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
+
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
- unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
+ unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
@@ -395,6 +400,7 @@ def _unstack_frame(obj, level):
value_columns=obj.columns)
return unstacker.get_result()
+
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
@@ -437,6 +443,7 @@ def stack(frame, level=-1, dropna=True):
new_index = new_index[mask]
return Series(new_values, index=new_index)
+
def _stack_multi_columns(frame, level=-1, dropna=True):
this = frame.copy()
@@ -491,7 +498,7 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
- new_names = [this.index.name] # something better?
+ new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level])
new_labels.append(np.tile(np.arange(levsize), N))
@@ -704,8 +711,8 @@ def make_axis_dummies(frame, axis='minor', transform=None):
Column names taken from chosen axis
"""
numbers = {
- 'major' : 0,
- 'minor' : 1
+ 'major': 0,
+ 'minor': 1
}
num = numbers.get(axis, axis)
@@ -722,6 +729,7 @@ def make_axis_dummies(frame, axis='minor', transform=None):
return DataFrame(values, columns=items, index=frame.index)
+
def block2d_to_block3d(values, items, shape, major_labels, minor_labels,
ref_items=None):
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index eca177c4c543b..7a7fc7159ecb4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -44,6 +44,7 @@
#----------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
+
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
@@ -124,7 +125,7 @@ def wrapper(self, other):
name = _maybe_match_name(self, other)
return Series(na_op(self.values, other.values),
index=self.index, name=name)
- elif isinstance(other, DataFrame): # pragma: no cover
+ elif isinstance(other, DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, np.ndarray):
return Series(na_op(self.values, np.asarray(other)),
@@ -160,8 +161,8 @@ def na_op(x, y):
if isinstance(y, np.ndarray):
if (x.dtype == np.bool_ and
- y.dtype == np.bool_): # pragma: no cover
- result = op(x, y) # when would this be hit?
+ y.dtype == np.bool_): # pragma: no cover
+ result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
@@ -187,7 +188,6 @@ def wrapper(self, other):
return wrapper
-
def _radd_compat(left, right):
radd = lambda x, y: y + x
# GH #353, NumPy 1.5.1 workaround
@@ -196,7 +196,7 @@ def _radd_compat(left, right):
except TypeError:
cond = (_np_version_under1p6 and
left.dtype == np.object_)
- if cond: # pragma: no cover
+ if cond: # pragma: no cover
output = np.empty_like(left)
output.flat[:] = [radd(x, right) for x in left.flat]
else:
@@ -239,6 +239,7 @@ def f(self, other, level=None, fill_value=None):
f.__name__ = name
return f
+
def _unbox(func):
@Appender(func.__doc__)
def f(self, *args, **kwargs):
@@ -288,9 +289,10 @@ def f(self, axis=0, dtype=None, out=None, skipna=True, level=None):
#----------------------------------------------------------------------
# Series class
+
class Series(np.ndarray, generic.PandasObject):
_AXIS_NUMBERS = {
- 'index' : 0
+ 'index': 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
@@ -322,7 +324,8 @@ def __new__(cls, data=None, index=None, dtype=None, name=None,
elif isinstance(index, PeriodIndex):
data = [data.get(i, nan) for i in index]
else:
- data = lib.fast_multiget(data, index.values, default=np.nan)
+ data = lib.fast_multiget(data, index.values,
+ default=np.nan)
except TypeError:
data = [data.get(i, nan) for i in index]
elif isinstance(data, types.GeneratorType):
@@ -830,7 +833,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
if name is None:
df = DataFrame(self)
else:
- df = DataFrame({name : self})
+ df = DataFrame({name: self})
return df.reset_index(level=level, drop=drop)
@@ -1153,7 +1156,7 @@ def max(self, axis=None, out=None, skipna=True, level=None):
@Substitution(name='standard deviation', shortname='stdev',
na_action=_doc_exclude_na, extras='')
- @Appender(_stat_doc +
+ @Appender(_stat_doc +
"""
Normalized by N-1 (unbiased estimator).
""")
@@ -1166,7 +1169,7 @@ def std(self, axis=None, dtype=None, out=None, ddof=1, skipna=True,
@Substitution(name='variance', shortname='var',
na_action=_doc_exclude_na, extras='')
- @Appender(_stat_doc +
+ @Appender(_stat_doc +
"""
Normalized by N-1 (unbiased estimator).
""")
@@ -1432,7 +1435,7 @@ def describe(self, percentile_width=50):
lib.Timestamp(top), freq]
else:
- lb = .5 * (1. - percentile_width/100.)
+ lb = .5 * (1. - percentile_width / 100.)
ub = 1. - lb
def pretty_name(x):
@@ -1567,7 +1570,7 @@ def clip_lower(self, threshold):
"""
return np.where(self < threshold, threshold, self)
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Combination
def append(self, to_append, verify_integrity=False):
@@ -1993,6 +1996,7 @@ def map(self, arg, na_action=None):
if na_action == 'ignore':
mask = isnull(values)
+
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
else:
@@ -2245,7 +2249,6 @@ def fillna(self, value=None, method='pad', inplace=False,
return result
-
def replace(self, to_replace, value=None, method='pad', inplace=False,
limit=None):
"""
@@ -2282,15 +2285,15 @@ def replace(self, to_replace, value=None, method='pad', inplace=False,
"""
result = self.copy() if not inplace else self
- def _rep_one(s, to_rep, v): # replace single value
+ def _rep_one(s, to_rep, v): # replace single value
mask = com.mask_missing(s.values, to_rep)
np.putmask(s.values, mask, v)
return s
- def _rep_dict(rs, to_rep): # replace {[src] -> dest}
+ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
all_src = set()
- dd = {} # group by unique destination value
+ dd = {} # group by unique destination value
for s, d in to_rep.iteritems():
dd.setdefault(d, []).append(s)
all_src.add(s)
@@ -2298,12 +2301,12 @@ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
if any(d in all_src for d in dd.keys()):
# don't clobber each other at the cost of temporaries
masks = {}
- for d, sset in dd.iteritems(): # now replace by each dest
+ for d, sset in dd.iteritems(): # now replace by each dest
masks[d] = com.mask_missing(rs.values, sset)
for d, m in masks.iteritems():
np.putmask(rs.values, m, d)
- else: # if no risk of clobbering then simple
+ else: # if no risk of clobbering then simple
for d, sset in dd.iteritems():
_rep_one(rs, sset, d)
return rs
@@ -2316,17 +2319,17 @@ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
if isinstance(to_replace, (list, np.ndarray)):
- if isinstance(value, (list, np.ndarray)): # check same length
+ if isinstance(value, (list, np.ndarray)): # check same length
vl, rl = len(value), len(to_replace)
if vl == rl:
return _rep_dict(result, dict(zip(to_replace, value)))
raise ValueError('Got %d to replace but %d values' % (rl, vl))
- elif value is not None: # otherwise all replaced with same value
+ elif value is not None: # otherwise all replaced with same value
return _rep_one(result, to_replace, value)
- else: # method
+ else: # method
if method is None: # pragma: no cover
raise ValueError('must specify a fill method')
fill_f = _get_fill_func(method)
@@ -2339,7 +2342,6 @@ def _rep_dict(rs, to_rep): # replace {[src] -> dest}
name=self.name)
return result
-
raise ValueError('Unrecognized to_replace type %s' %
type(to_replace))
@@ -2746,9 +2748,10 @@ def str(self):
_INDEX_TYPES = ndarray, Index, list, tuple
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Supplementary functions
+
def remove_na(arr):
"""
Return array containing only true/non-NaN values, possibly empty.
@@ -2769,7 +2772,7 @@ def _try_cast(arr):
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
- else: # pragma: no cover
+ else: # pragma: no cover
subarr = np.array(data, dtype=object, copy=copy)
return subarr
@@ -2801,7 +2804,7 @@ def _try_cast(arr):
try:
subarr = _try_cast(data)
except Exception:
- if raise_cast_failure: # pragma: no cover
+ if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
@@ -2846,6 +2849,7 @@ def _try_cast(arr):
return subarr
+
def _dtype_from_scalar(val):
if isinstance(val, np.datetime64):
# ugly hacklet
@@ -2853,6 +2857,7 @@ def _dtype_from_scalar(val):
return val, np.dtype('M8[ns]')
return val, type(val)
+
def _get_rename_function(mapper):
if isinstance(mapper, (dict, Series)):
def f(x):
@@ -2865,9 +2870,8 @@ def f(x):
return f
-def _resolve_offset(freq, kwds):
- from pandas.core.datetools import getOffset
+def _resolve_offset(freq, kwds):
if 'timeRule' in kwds or 'offset' in kwds:
offset = kwds.get('offset', None)
offset = kwds.get('timeRule', offset)
@@ -2886,6 +2890,7 @@ def _resolve_offset(freq, kwds):
return offset
+
def _get_fill_func(method):
method = com._clean_fill_method(method)
if method == 'pad':
@@ -2904,6 +2909,7 @@ def _get_fill_func(method):
# Put here, otherwise monkey-patching in methods fails
+
class TimeSeries(Series):
def _repr_footer(self):
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3172c5a395548..cdbeffbbafdd1 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -9,6 +9,7 @@
import pandas.core.common as com
import operator
+
class repeat(object):
def __init__(self, obj):
self.obj = obj
@@ -16,6 +17,7 @@ def __init__(self, obj):
def __getitem__(self, i):
return self.obj
+
class azip(object):
def __init__(self, *args):
self.cols = []
@@ -328,6 +330,7 @@ def f(x):
return _na_map(f, arr)
+
def str_repeat(arr, repeats):
"""
Duplicate each string in the array by indicated number of times
@@ -358,6 +361,7 @@ def rep(x, r):
result = lib.vec_binop(arr, repeats, rep)
return result
+
def str_match(arr, pat, flags=0):
"""
Find groups in each string (from beginning) using passed regular expression
@@ -374,6 +378,7 @@ def str_match(arr, pat, flags=0):
matches : array
"""
regex = re.compile(pat, flags=flags)
+
def f(x):
m = regex.match(x)
if m:
@@ -384,7 +389,6 @@ def f(x):
return _na_map(f, arr)
-
def str_join(arr, sep):
"""
Join lists contained as elements in array, a la str.join
@@ -412,7 +416,6 @@ def str_len(arr):
return _na_map(len, arr)
-
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression
@@ -582,6 +585,7 @@ def str_wrap(arr, width=80):
"""
raise NotImplementedError
+
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the array
@@ -598,6 +602,7 @@ def str_get(arr, i):
f = lambda x: x[i]
return _na_map(f, arr)
+
def str_decode(arr, encoding):
"""
Decode character string to unicode using indicated encoding
@@ -613,6 +618,7 @@ def str_decode(arr, encoding):
f = lambda x: x.decode(encoding)
return _na_map(f, arr)
+
def str_encode(arr, encoding):
"""
Encode character string to unicode using indicated encoding
@@ -628,6 +634,7 @@ def str_encode(arr, encoding):
f = lambda x: x.encode(encoding)
return _na_map(f, arr)
+
def _noarg_wrapper(f):
def wrapper(self):
result = f(self.series)
@@ -661,6 +668,7 @@ def wrapper3(self, pat, na=np.nan):
return wrapper
+
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 8753d1dabfba2..e4c1ae9a9817a 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -82,13 +82,14 @@ def get_quote_yahoo(symbols):
if not isinstance(symbols, list):
raise TypeError, "symbols must be a list"
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
- codes = {'symbol':'s','last':'l1','change_pct':'p2','PE':'r','time':'t1','short_ratio':'s7'}
- request = str.join('',codes.values()) # code request string
+ codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
+ 'time': 't1', 'short_ratio': 's7'}
+ request = str.join('',codes.values()) # code request string
header = codes.keys()
data = dict(zip(codes.keys(), [[] for i in range(len(codes))]))
- urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
+ urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+', symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
@@ -178,8 +179,8 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1),
url = fred_URL + '%s' % name + \
'/downloaddata/%s' % name + '.csv'
- data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True, header=None,
- skiprows=1, names=["DATE", name])
+ data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True,
+ header=None, skiprows=1, names=["DATE", name])
return data.truncate(start, end)
@@ -197,10 +198,10 @@ def get_data_famafrench(name, start=None, end=None):
datasets = {}
for i in range(len(file_edges) - 1):
- dataset = [d.split() for d in data[(file_edges[i] + 1):file_edges[i+1]]]
+ dataset = [d.split() for d in data[(file_edges[i] + 1):file_edges[i + 1]]]
if(len(dataset) > 10):
ncol = np.median(np.array([len(d) for d in dataset]))
- header_index = np.where(np.array([len(d) for d in dataset]) == (ncol-1))[0][-1]
+ header_index = np.where(np.array([len(d) for d in dataset]) == (ncol - 1))[0][-1]
header = dataset[header_index]
# to ensure the header is unique
header = [str(j + 1) + " " + header[j] for j in range(len(header))]
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index b9325b97b30ce..ce670eec7032f 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -2,17 +2,20 @@
import numpy as np
import pandas.lib as lib
+
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return lib.try_parse_date_and_time(date_col, time_col)
+
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return lib.try_parse_year_month_day(year_col, month_col, day_col)
+
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
@@ -24,6 +27,7 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
return lib.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col, second_col)
+
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
@@ -34,11 +38,13 @@ def generic_parser(parse_func, *cols):
return results
+
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
+
def _check_columns(cols):
assert(len(cols) > 0)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index db8c4a132d25b..76002917e8e47 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -26,6 +26,7 @@ def next(x):
from pandas.util.decorators import Appender
+
class DateConversionError(Exception):
pass
@@ -146,11 +147,12 @@ def _is_url(url):
Very naive check to see if url is an http(s), ftp, or file location.
"""
parsed_url = urlparse(url)
- if parsed_url.scheme in ['http','file', 'ftp', 'https']:
+ if parsed_url.scheme in ['http', 'file', 'ftp', 'https']:
return True
else:
return False
+
def _read(cls, filepath_or_buffer, kwds):
"Generic reader of line files."
encoding = kwds.get('encoding', None)
@@ -176,7 +178,7 @@ def _read(cls, filepath_or_buffer, kwds):
try:
# universal newline mode
f = com._get_handle(filepath_or_buffer, 'U', encoding=encoding)
- except Exception: # pragma: no cover
+ except Exception: # pragma: no cover
f = com._get_handle(filepath_or_buffer, 'r', encoding=encoding)
if kwds.get('date_parser', None) is not None:
@@ -199,6 +201,7 @@ def _read(cls, filepath_or_buffer, kwds):
return parser.get_chunk()
+
@Appender(_read_csv_doc)
def read_csv(filepath_or_buffer,
sep=',',
@@ -249,6 +252,7 @@ def read_csv(filepath_or_buffer,
return _read(TextParser, filepath_or_buffer, kdict)
+
@Appender(_read_table_doc)
def read_table(filepath_or_buffer,
sep='\t',
@@ -299,6 +303,7 @@ def read_table(filepath_or_buffer,
return _read(TextParser, filepath_or_buffer, kdict)
+
@Appender(_read_fwf_doc)
def read_fwf(filepath_or_buffer,
colspecs=None,
@@ -353,13 +358,14 @@ def read_fwf(filepath_or_buffer,
if widths is not None:
colspecs, col = [], 0
for w in widths:
- colspecs.append( (col, col+w) )
+ colspecs.append((col, col+w))
col += w
kdict['colspecs'] = colspecs
kdict['thousands'] = thousands
return _read(FixedWidthFieldParser, filepath_or_buffer, kdict)
+
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
@@ -373,7 +379,8 @@ def read_clipboard(**kwargs): # pragma: no cover
text = clipboard_get()
return read_table(StringIO(text), **kwargs)
-def to_clipboard(obj): # pragma: no cover
+
+def to_clipboard(obj): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
@@ -387,6 +394,7 @@ def to_clipboard(obj): # pragma: no cover
from pandas.util.clipboard import clipboard_set
clipboard_set(str(obj))
+
class BufferedReader(object):
"""
For handling different kinds of files, e.g. zip files where reading out a
@@ -394,7 +402,8 @@ class BufferedReader(object):
"""
def __init__(self, fh, delimiter=','):
- pass # pragma: no coverage
+ pass # pragma: no coverage
+
class BufferedCSVReader(BufferedReader):
pass
@@ -817,7 +826,7 @@ def get_chunk(self, rows=None):
self._first_chunk = False
columns = list(self.orig_columns)
- if len(content) == 0: # pragma: no cover
+ if len(content) == 0: # pragma: no cover
if self.index_col is not None:
if np.isscalar(self.index_col):
index = Index([], name=self.index_name)
@@ -903,7 +912,7 @@ def ix(col):
index = data.pop(i)
if not self._implicit_index:
columns.pop(i)
- else: # given a list of index
+ else: # given a list of index
to_remove = []
index = []
for idx in self.index_col:
@@ -938,7 +947,7 @@ def _get_name(icol):
name = _get_name(self.index_col)
index = data.pop(name)
col_names.remove(name)
- else: # given a list of index
+ else: # given a list of index
to_remove = []
index = []
for idx in self.index_col:
@@ -1085,7 +1094,7 @@ def _get_lines(self, rows=None):
lines.extend(source[self.pos:])
self.pos = len(source)
else:
- lines.extend(source[self.pos:self.pos+rows])
+ lines.extend(source[self.pos:self.pos + rows])
self.pos += rows
else:
new_rows = []
@@ -1121,6 +1130,7 @@ def _get_lines(self, rows=None):
lines = self._check_comments(lines)
return self._check_thousands(lines)
+
def _get_na_values(col, na_values):
if isinstance(na_values, dict):
if col in na_values:
@@ -1130,6 +1140,7 @@ def _get_na_values(col, na_values):
else:
return na_values
+
def _convert_to_ndarrays(dct, na_values, verbose=False):
result = {}
for c, values in dct.iteritems():
@@ -1140,6 +1151,7 @@ def _convert_to_ndarrays(dct, na_values, verbose=False):
print 'Filled %d NA values in column %s' % (na_count, str(c))
return result
+
def _convert_types(values, na_values):
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
@@ -1162,6 +1174,7 @@ def _convert_types(values, na_values):
return result, na_count
+
def _try_convert_dates(parser, colspec, data_dict, columns):
colspec = _get_col_names(colspec, columns)
new_name = '_'.join([str(x) for x in colspec])
@@ -1173,6 +1186,7 @@ def _try_convert_dates(parser, colspec, data_dict, columns):
new_col = parser(_concat_date_cols(to_parse))
return new_name, new_col, colspec
+
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
@@ -1201,7 +1215,7 @@ class FixedWidthReader(object):
def __init__(self, f, colspecs, filler, thousands=None):
self.f = f
self.colspecs = colspecs
- self.filler = filler # Empty characters between fields.
+ self.filler = filler # Empty characters between fields.
self.thousands = thousands
assert isinstance(colspecs, (tuple, list))
@@ -1323,8 +1337,8 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
if skipfooter is not None:
skip_footer = skipfooter
- choose = {True:self._parse_xlsx,
- False:self._parse_xls}
+ choose = {True: self._parse_xlsx,
+ False: self._parse_xls}
return choose[self.use_xlsx](sheetname, header=header,
skiprows=skiprows, index_col=index_col,
parse_cols=parse_cols,
@@ -1399,7 +1413,7 @@ def _parse_xls(self, sheetname, header=0, skiprows=None,
if typ == XL_CELL_DATE:
dt = xldate_as_tuple(value, datemode)
# how to produce this first case?
- if dt[0] < MINYEAR: # pragma: no cover
+ if dt[0] < MINYEAR: # pragma: no cover
value = time(*dt[3:])
else:
value = datetime(*dt)
@@ -1436,6 +1450,7 @@ def _trim_excel_header(row):
row = row[1:]
return row
+
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
@@ -1456,7 +1471,7 @@ def __init__(self, path):
self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
else:
from openpyxl.workbook import Workbook
- self.book = Workbook(optimized_write = True)
+ self.book = Workbook(optimized_write=True)
self.path = path
self.sheets = {}
self.cur_sheet = None
@@ -1498,15 +1513,15 @@ def _writerow_xls(self, row, sheet_name):
for i, val in enumerate(row):
if isinstance(val, (datetime.datetime, datetime.date)):
if isinstance(val, datetime.datetime):
- sheetrow.write(i,val, self.fm_datetime)
+ sheetrow.write(i, val, self.fm_datetime)
else:
- sheetrow.write(i,val, self.fm_date)
+ sheetrow.write(i, val, self.fm_date)
elif isinstance(val, np.int64):
- sheetrow.write(i,int(val))
+ sheetrow.write(i, int(val))
elif isinstance(val, np.bool8):
- sheetrow.write(i,bool(val))
+ sheetrow.write(i, bool(val))
else:
- sheetrow.write(i,val)
+ sheetrow.write(i, val)
row_idx += 1
if row_idx == 1000:
sheet.flush_row_data()
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c82dab08a3a1c..af480b5a6457f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -29,44 +29,45 @@
# reading and writing the full object in one go
_TYPE_MAP = {
- Series : 'series',
- SparseSeries : 'sparse_series',
- TimeSeries : 'series',
- DataFrame : 'frame',
- SparseDataFrame : 'sparse_frame',
- Panel : 'wide',
- SparsePanel : 'sparse_panel'
+ Series: 'series',
+ SparseSeries: 'sparse_series',
+ TimeSeries: 'series',
+ DataFrame: 'frame',
+ SparseDataFrame: 'sparse_frame',
+ Panel: 'wide',
+ SparsePanel: 'sparse_panel'
}
_NAME_MAP = {
- 'series' : 'Series',
- 'time_series' : 'TimeSeries',
- 'sparse_series' : 'SparseSeries',
- 'frame' : 'DataFrame',
- 'sparse_frame' : 'SparseDataFrame',
- 'frame_table' : 'DataFrame (Table)',
- 'wide' : 'Panel',
- 'sparse_panel' : 'SparsePanel',
- 'wide_table' : 'Panel (Table)',
- 'long' : 'LongPanel',
+ 'series': 'Series',
+ 'time_series': 'TimeSeries',
+ 'sparse_series': 'SparseSeries',
+ 'frame': 'DataFrame',
+ 'sparse_frame': 'SparseDataFrame',
+ 'frame_table': 'DataFrame (Table)',
+ 'wide': 'Panel',
+ 'sparse_panel': 'SparsePanel',
+ 'wide_table': 'Panel (Table)',
+ 'long': 'LongPanel',
# legacy h5 files
- 'Series' : 'Series',
- 'TimeSeries' : 'TimeSeries',
- 'DataFrame' : 'DataFrame',
- 'DataMatrix' : 'DataMatrix'
+ 'Series': 'Series',
+ 'TimeSeries': 'TimeSeries',
+ 'DataFrame': 'DataFrame',
+ 'DataMatrix': 'DataMatrix'
}
# legacy handlers
_LEGACY_MAP = {
- 'Series' : 'legacy_series',
- 'TimeSeries' : 'legacy_series',
- 'DataFrame' : 'legacy_frame',
- 'DataMatrix' : 'legacy_frame',
- 'WidePanel' : 'wide_table',
+ 'Series': 'legacy_series',
+ 'TimeSeries': 'legacy_series',
+ 'DataFrame': 'legacy_frame',
+ 'DataMatrix': 'legacy_frame',
+ 'WidePanel': 'wide_table',
}
# oh the troubles to reduce import time
_table_mod = None
+
def _tables():
global _table_mod
if _table_mod is None:
@@ -74,6 +75,7 @@ def _tables():
_table_mod = tables
return _table_mod
+
@contextmanager
def get_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
@@ -120,6 +122,7 @@ def get_store(path, mode='a', complevel=None, complib=None,
if store is not None:
store.close()
+
class HDFStore(object):
"""
dict-like IO interface for storing pandas objects in PyTables
@@ -167,7 +170,7 @@ def __init__(self, path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
import tables as _
- except ImportError: # pragma: no cover
+ except ImportError: # pragma: no cover
raise Exception('HDFStore requires PyTables')
self.path = path
@@ -226,7 +229,7 @@ def open(self, mode='a', warn=True):
See HDFStore docstring or tables.openFile for info about modes
"""
self.mode = mode
- if warn and mode == 'w': # pragma: no cover
+ if warn and mode == 'w': # pragma: no cover
while True:
response = raw_input("Re-opening as mode='w' will delete the "
"current file. Continue (y/n)?")
@@ -328,8 +331,8 @@ def put(self, key, value, table=False, append=False,
value : {Series, DataFrame, Panel}
table : boolean, default False
Write as a PyTables Table structure which may perform worse but
- allow more flexible operations like searching / selecting subsets of
- the data
+ allow more flexible operations like searching / selecting subsets
+ of the data
append : boolean, default False
For table data structures, append the input data to the existing
table
@@ -342,7 +345,7 @@ def put(self, key, value, table=False, append=False,
comp=compression)
def _get_handler(self, op, kind):
- return getattr(self,'_%s_%s' % (op, kind))
+ return getattr(self, '_%s_%s' % (op, kind))
def remove(self, key, where=None):
"""
@@ -666,7 +669,8 @@ def _read_index_node(self, node):
if 'name' in node._v_attrs:
name = node._v_attrs.name
- index_class = _alias_to_class(getattr(node._v_attrs, 'index_class', ''))
+ index_class = _alias_to_class(getattr(node._v_attrs,
+ 'index_class', ''))
factory = _get_index_factory(index_class)
kwargs = {}
@@ -714,7 +718,6 @@ def _write_array(self, group, key, value):
getattr(group, key)._v_attrs.transposed = transposed
return
-
if value.dtype.type == np.object_:
vlarr = self.handle.createVLArray(group, key,
_tables().ObjectAtom())
@@ -749,12 +752,12 @@ def _write_table(self, group, items=None, index=None, columns=None,
if 'table' not in group:
# create the table
- desc = {'index' : index_t,
- 'column' : col_t,
- 'values' : _tables().FloatCol(shape=(len(values)))}
+ desc = {'index': index_t,
+ 'column': col_t,
+ 'values': _tables().FloatCol(shape=(len(values)))}
- options = {'name' : 'table',
- 'description' : desc}
+ options = {'name': 'table',
+ 'description': desc}
if compression:
complevel = self.complevel
@@ -783,7 +786,7 @@ def _write_table(self, group, items=None, index=None, columns=None,
table._v_attrs.index_kind = index_kind
table._v_attrs.columns_kind = cols_kind
if append:
- existing_fields = getattr(table._v_attrs,'fields',None)
+ existing_fields = getattr(table._v_attrs, 'fields', None)
if (existing_fields is not None and
existing_fields != list(items)):
raise Exception("appended items do not match existing items"
@@ -809,7 +812,7 @@ def _write_table(self, group, items=None, index=None, columns=None,
row['values'] = v
row.append()
self.handle.flush()
- except (ValueError), detail: # pragma: no cover
+ except (ValueError), detail: # pragma: no cover
print "value_error in _write_table -> %s" % str(detail)
try:
self.handle.flush()
@@ -918,6 +921,7 @@ def _read_panel_table(self, group, where=None):
wp = wp.reindex(minor=new_minor)
return wp
+
def _delete_from_table(self, group, where = None):
table = getattr(group, 'table')
@@ -933,6 +937,7 @@ def _delete_from_table(self, group, where = None):
self.handle.flush()
return len(s.values)
+
def _convert_index(index):
if isinstance(index, DatetimeIndex):
converted = index.asi8
@@ -960,7 +965,7 @@ def _convert_index(index):
converted = np.array([time.mktime(v.timetuple()) for v in values],
dtype=np.int32)
return converted, 'date', _tables().Time32Col()
- elif inferred_type =='string':
+ elif inferred_type == 'string':
converted = np.array(list(values), dtype=np.str_)
itemsize = converted.dtype.itemsize
return converted, 'string', _tables().StringCol(itemsize)
@@ -974,10 +979,11 @@ def _convert_index(index):
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return np.asarray(values, dtype=np.float64), 'float', atom
- else: # pragma: no cover
+ else: # pragma: no cover
atom = _tables().ObjectAtom()
return np.asarray(values, dtype='O'), 'object', atom
+
def _read_array(group, key):
import tables
node = getattr(group, key)
@@ -1006,6 +1012,7 @@ def _read_array(group, key):
else:
return ret
+
def _unconvert_index(data, kind):
if kind == 'datetime64':
index = DatetimeIndex(data)
@@ -1018,19 +1025,21 @@ def _unconvert_index(data, kind):
index = np.array(data)
elif kind == 'object':
index = np.array(data[0])
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
+
def _unconvert_index_legacy(data, kind, legacy=False):
if kind == 'datetime':
index = lib.time64_to_datetime(data)
elif kind in ('string', 'integer'):
index = np.array(data, dtype=object)
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
+
def _maybe_convert(values, val_kind):
if _need_convert(val_kind):
conv = _get_converter(val_kind)
@@ -1038,19 +1047,22 @@ def _maybe_convert(values, val_kind):
values = conv(values)
return values
+
def _get_converter(kind):
if kind == 'datetime64':
return lambda x: np.array(x, dtype='M8[ns]')
if kind == 'datetime':
return lib.convert_timestamps
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
+
def _need_convert(kind):
if kind in ('datetime', 'datetime64'):
return True
return False
+
def _is_table_type(group):
try:
return 'table' in group._v_attrs.pandas_type
@@ -1058,21 +1070,24 @@ def _is_table_type(group):
# new node, e.g.
return False
-_index_type_map = {DatetimeIndex : 'datetime',
- PeriodIndex : 'period'}
+_index_type_map = {DatetimeIndex: 'datetime',
+ PeriodIndex: 'period'}
_reverse_index_map = {}
for k, v in _index_type_map.iteritems():
_reverse_index_map[v] = k
+
def _class_to_alias(cls):
return _index_type_map.get(cls, '')
+
def _alias_to_class(alias):
- if isinstance(alias, type): # pragma: no cover
- return alias # compat: for a short period of time master stored types
+ if isinstance(alias, type): # pragma: no cover
+ return alias # compat: for a short period of time master stored types
return _reverse_index_map.get(alias, Index)
+
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
@@ -1109,18 +1124,18 @@ def __init__(self, table, where=None, index_kind=None):
def generate(self, where):
# and condictions
for c in where:
- op = c.get('op',None)
+ op = c.get('op', None)
value = c['value']
field = c['field']
if field == 'index' and self.index_kind == 'datetime64':
val = lib.Timestamp(value).value
- self.conditions.append('(%s %s %s)' % (field,op,val))
+ self.conditions.append('(%s %s %s)' % (field, op, val))
elif field == 'index' and isinstance(value, datetime):
value = time.mktime(value.timetuple())
- self.conditions.append('(%s %s %s)' % (field,op,value))
+ self.conditions.append('(%s %s %s)' % (field, op, value))
else:
- self.generate_multiple_conditions(op,value,field)
+ self.generate_multiple_conditions(op, value, field)
if len(self.conditions):
self.the_condition = '(' + ' & '.join(self.conditions) + ')'
@@ -1129,15 +1144,15 @@ def generate_multiple_conditions(self, op, value, field):
if op and op == 'in' or isinstance(value, (list, np.ndarray)):
if len(value) <= 61:
- l = '(' + ' | '.join([ "(%s == '%s')" % (field,v)
- for v in value ]) + ')'
+ l = '(' + ' | '.join([ "(%s == '%s')" % (field, v)
+ for v in value]) + ')'
self.conditions.append(l)
else:
self.column_filter = set(value)
else:
if op is None:
op = '=='
- self.conditions.append('(%s %s "%s")' % (field,op,value))
+ self.conditions.append('(%s %s "%s")' % (field, op, value))
def select(self):
"""
@@ -1155,6 +1170,7 @@ def select_coords(self):
"""
self.values = self.table.getWhereList(self.the_condition)
+
def _get_index_factory(klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 6d1628fd8b21f..021f80c065e75 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -10,9 +10,10 @@
from pandas.core.datetools import format as date_format
from pandas.core.api import DataFrame, isnull
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Helper execution function
+
def execute(sql, con, retry=True, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
@@ -44,17 +45,19 @@ def execute(sql, con, retry=True, cur=None, params=None):
print 'Error on sql %s' % sql
raise
+
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
- except Exception, e: # pragma: no cover
+ except Exception, e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
+
def tquery(sql, con=None, cur=None, retry=True):
"""
Returns list of tuples corresponding to each row in given sql
@@ -98,6 +101,7 @@ def tquery(sql, con=None, cur=None, retry=True):
return result
+
def uquery(sql, con=None, cur=None, retry=True, params=()):
"""
Does the same thing as tquery, but instead of returning results, it
@@ -119,6 +123,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=()):
return uquery(sql, con, retry=False)
return result
+
def read_frame(sql, con, index_col=None, coerce_float=True):
"""
Returns a DataFrame corresponding to the result set of the query
@@ -152,6 +157,7 @@ def read_frame(sql, con, index_col=None, coerce_float=True):
frame_query = read_frame
+
def write_frame(frame, name=None, con=None, flavor='sqlite', append=False):
"""
Write records stored in a DataFrame to SQLite. The index will currently be
@@ -170,11 +176,13 @@ def write_frame(frame, name=None, con=None, flavor='sqlite', append=False):
data = [tuple(x) for x in frame.values]
con.executemany(insert_sql, data)
+
def has_table(name, con):
sqlstr = "SELECT name FROM sqlite_master WHERE type='table' AND name='%s'" % name
rs = tquery(sqlstr, con)
return len(rs) > 0
+
def get_sqlite_schema(frame, name, dtypes=None, keys=None):
template = """
CREATE TABLE %(name)s (
@@ -206,26 +214,25 @@ def get_sqlite_schema(frame, name, dtypes=None, keys=None):
if isinstance(keys, basestring):
keys = (keys,)
keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
- return template % {'name' : name, 'columns' : columns, 'keystr' : keystr}
-
+ return template % {'name': name, 'columns': columns, 'keystr': keystr}
-
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Query formatting
_formatters = {
- datetime : lambda dt: "'%s'" % date_format(dt),
- str : lambda x: "'%s'" % x,
- np.str_ : lambda x: "'%s'" % x,
- unicode : lambda x: "'%s'" % x,
- float : lambda x: "%.8f" % x,
- int : lambda x: "%s" % x,
- type(None) : lambda x: "NULL",
- np.float64 : lambda x: "%.10f" % x,
- bool : lambda x: "'%s'" % x,
+ datetime: lambda dt: "'%s'" % date_format(dt),
+ str: lambda x: "'%s'" % x,
+ np.str_: lambda x: "'%s'" % x,
+ unicode: lambda x: "'%s'" % x,
+ float: lambda x: "%.8f" % x,
+ int: lambda x: "%s" % x,
+ type(None): lambda x: "NULL",
+ np.float64: lambda x: "%.10f" % x,
+ bool: lambda x: "'%s'" % x,
}
+
def format_query(sql, *args):
"""
diff --git a/pandas/rpy/base.py b/pandas/rpy/base.py
index 070d457edd21d..0c80448684697 100644
--- a/pandas/rpy/base.py
+++ b/pandas/rpy/base.py
@@ -1,5 +1,6 @@
import pandas.rpy.util as util
+
class lm(object):
"""
Examples
@@ -10,4 +11,3 @@ class lm(object):
def __init__(self, formula, data):
pass
-
diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py
index f81ec7ef369ae..481714b94386c 100644
--- a/pandas/rpy/common.py
+++ b/pandas/rpy/common.py
@@ -16,6 +16,7 @@
__all__ = ['convert_robj', 'load_data', 'convert_to_r_dataframe',
'convert_to_r_matrix']
+
def load_data(name, package=None, convert=True):
if package:
importr(package)
@@ -29,15 +30,18 @@ def load_data(name, package=None, convert=True):
else:
return robj
+
def _rclass(obj):
"""
Return R class name for input object
"""
return r['class'](obj)[0]
+
def _is_null(obj):
return _rclass(obj) == 'NULL'
+
def _convert_list(obj):
"""
Convert named Vector to dict
@@ -45,6 +49,7 @@ def _convert_list(obj):
values = [convert_robj(x) for x in obj]
return dict(zip(obj.names, values))
+
def _convert_array(obj):
"""
Convert Array to ndarray
@@ -59,7 +64,6 @@ def _convert_array(obj):
if len(dim) == 3:
arr = values.reshape(dim[-1:] + dim[:-1]).swapaxes(1, 2)
-
if obj.names is not None:
name_list = [list(x) for x in obj.names]
if len(dim) == 2:
@@ -73,6 +77,7 @@ def _convert_array(obj):
else:
return arr
+
def _convert_vector(obj):
if isinstance(obj, robj.IntVector):
return _convert_int_vector(obj)
@@ -83,6 +88,7 @@ def _convert_vector(obj):
NA_INTEGER = -2147483648
+
def _convert_int_vector(obj):
arr = np.asarray(obj)
mask = arr == NA_INTEGER
@@ -91,6 +97,7 @@ def _convert_int_vector(obj):
arr[mask] = np.nan
return arr
+
def _convert_str_vector(obj):
arr = np.asarray(obj, dtype=object)
mask = arr == robj.NA_Character
@@ -98,6 +105,7 @@ def _convert_str_vector(obj):
arr[mask] = np.nan
return arr
+
def _convert_DataFrame(rdf):
columns = list(rdf.colnames)
rows = np.array(rdf.rownames)
@@ -125,6 +133,7 @@ def _convert_DataFrame(rdf):
return pd.DataFrame(data, index=_check_int(rows), columns=columns)
+
def _convert_Matrix(mat):
columns = mat.colnames
rows = mat.rownames
@@ -135,6 +144,7 @@ def _convert_Matrix(mat):
return pd.DataFrame(np.array(mat), index=_check_int(index),
columns=columns)
+
def _check_int(vec):
try:
# R observation numbers come through as strings
@@ -145,8 +155,8 @@ def _check_int(vec):
return vec
_pandas_converters = [
- (robj.DataFrame , _convert_DataFrame),
- (robj.Matrix , _convert_Matrix),
+ (robj.DataFrame, _convert_DataFrame),
+ (robj.Matrix, _convert_Matrix),
(robj.StrVector, _convert_vector),
(robj.FloatVector, _convert_vector),
(robj.Array, _convert_array),
@@ -154,8 +164,8 @@ def _check_int(vec):
]
_converters = [
- (robj.DataFrame , lambda x: _convert_DataFrame(x).toRecords(index=False)),
- (robj.Matrix , lambda x: _convert_Matrix(x).toRecords(index=False)),
+ (robj.DataFrame, lambda x: _convert_DataFrame(x).toRecords(index=False)),
+ (robj.Matrix, lambda x: _convert_Matrix(x).toRecords(index=False)),
(robj.IntVector, _convert_vector),
(robj.StrVector, _convert_vector),
(robj.FloatVector, _convert_vector),
@@ -163,6 +173,7 @@ def _check_int(vec):
(robj.Vector, _convert_list),
]
+
def convert_robj(obj, use_pandas=True):
"""
Convert rpy2 object to a pandas-friendly form
@@ -206,6 +217,7 @@ def convert_robj(obj, use_pandas=True):
np.str: robj.NA_Character,
np.bool: robj.NA_Logical}
+
def convert_to_r_dataframe(df, strings_as_factors=False):
"""
Convert a pandas DataFrame to a R data.frame.
@@ -270,7 +282,6 @@ def convert_to_r_matrix(df, strings_as_factors=False):
raise TypeError("Conversion to matrix only possible with non-mixed "
"type DataFrames")
-
r_dataframe = convert_to_r_dataframe(df, strings_as_factors)
as_matrix = robj.baseenv.get("as.matrix")
r_matrix = as_matrix(r_dataframe)
@@ -282,18 +293,20 @@ def test_convert_list():
obj = r('list(a=1, b=2, c=3)')
converted = convert_robj(obj)
- expected = {'a' : [1], 'b' : [2], 'c' : [3]}
+ expected = {'a': [1], 'b': [2], 'c': [3]}
_test.assert_dict_equal(converted, expected)
+
def test_convert_nested_list():
obj = r('list(a=list(foo=1, bar=2))')
converted = convert_robj(obj)
- expected = {'a' : {'foo' : [1], 'bar' : [2]}}
+ expected = {'a': {'foo': [1], 'bar': [2]}}
_test.assert_dict_equal(converted, expected)
+
def test_convert_frame():
# built-in dataset
df = r['faithful']
@@ -303,6 +316,7 @@ def test_convert_frame():
assert np.array_equal(converted.columns, ['eruptions', 'waiting'])
assert np.array_equal(converted.index, np.arange(1, 273))
+
def _test_matrix():
r('mat <- matrix(rnorm(9), ncol=3)')
r('colnames(mat) <- c("one", "two", "three")')
@@ -310,6 +324,7 @@ def _test_matrix():
return r['mat']
+
def test_convert_matrix():
mat = _test_matrix()
@@ -318,6 +333,7 @@ def test_convert_matrix():
assert np.array_equal(converted.index, ['a', 'b', 'c'])
assert np.array_equal(converted.columns, ['one', 'two', 'three'])
+
def test_convert_r_dataframe():
is_na = robj.baseenv.get("is.na")
@@ -350,6 +366,7 @@ def test_convert_r_dataframe():
else:
assert original == converted
+
def test_convert_r_matrix():
is_na = robj.baseenv.get("is.na")
diff --git a/pandas/rpy/mass.py b/pandas/rpy/mass.py
index 1a663e5729b5f..12fbbdfa4dc98 100644
--- a/pandas/rpy/mass.py
+++ b/pandas/rpy/mass.py
@@ -1,4 +1,2 @@
-
class rlm(object):
pass
-
diff --git a/pandas/rpy/vars.py b/pandas/rpy/vars.py
index 3993423b338ee..4756b2779224c 100644
--- a/pandas/rpy/vars.py
+++ b/pandas/rpy/vars.py
@@ -1,5 +1,6 @@
import pandas.rpy.util as util
+
class VAR(object):
"""
@@ -17,4 +18,3 @@ class VAR(object):
def __init__(y, p=1, type="none", season=None, exogen=None,
lag_max=None, ic=None):
pass
-
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index f0f02d317f26a..c7e783dee910d 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -35,12 +35,13 @@ def wrapper(self, other):
return SparseArray(op(self.sp_values, other),
sparse_index=self.sp_index,
fill_value=new_fill_value)
- else: # pragma: no cover
+ else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
return wrapper
+
def _sparse_array_op(left, right, op, name):
if np.isnan(left.fill_value):
sparse_op = lambda a, b: _sparse_nanop(a, b, name)
@@ -61,6 +62,7 @@ def _sparse_array_op(left, right, op, name):
return SparseArray(result, sparse_index=result_index,
fill_value=fill_value)
+
def _sparse_nanop(this, other, name):
sparse_op = getattr(splib, 'sparse_nan%s' % name)
result, result_index = sparse_op(this.sp_values,
@@ -70,6 +72,7 @@ def _sparse_nanop(this, other, name):
return result, result_index
+
def _sparse_fillop(this, other, name):
sparse_op = getattr(splib, 'sparse_%s' % name)
result, result_index = sparse_op(this.sp_values,
@@ -399,6 +402,7 @@ def mean(self, axis=None, dtype=None, out=None):
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
+
def make_sparse(arr, kind='block', fill_value=nan):
"""
Convert ndarray to sparse format
@@ -428,7 +432,7 @@ def make_sparse(arr, kind='block', fill_value=nan):
index = BlockIndex(length, locs, lens)
elif kind == 'integer':
index = IntIndex(length, indices)
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError('must be block or integer type')
sparsified_values = arr[mask]
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index c26a37852ea42..0df726fcb40fd 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -1,6 +1,6 @@
"""
-Data structures for sparse float data. Life is made simpler by dealing only with
-float64 data
+Data structures for sparse float data. Life is made simpler by dealing only
+with float64 data
"""
# pylint: disable=E1101,E1103,W0231,E0202
@@ -42,6 +42,7 @@ def shape(self):
def axes(self):
return [self.sp_frame.columns, self.sp_frame.index]
+
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
@@ -291,10 +292,11 @@ def _delete_column_index(self, loc):
new_columns = self.columns[:loc]
else:
new_columns = Index(np.concatenate((self.columns[:loc],
- self.columns[loc+1:])))
+ self.columns[loc + 1:])))
self.columns = new_columns
_index = None
+
def _set_index(self, index):
self._index = _ensure_index(index)
for v in self._series.values():
@@ -337,7 +339,7 @@ def __getitem__(self, key):
if com._is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
return self._getitem_array(key)
- else: # pragma: no cover
+ else: # pragma: no cover
raise
@Appender(DataFrame.get_value.__doc__, indents=0)
@@ -575,7 +577,7 @@ def _rename_columns_inplace(self, mapper):
for col in self.columns:
new_col = mapper(col)
- if new_col in new_series: # pragma: no cover
+ if new_col in new_series: # pragma: no cover
raise Exception('Non-unique mapping!')
new_series[new_col] = self[col]
new_columns.append(new_col)
@@ -626,7 +628,7 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
assert(other.name is not None)
- other = SparseDataFrame({other.name : other},
+ other = SparseDataFrame({other.name: other},
default_fill_value=self.default_fill_value)
join_index = self.index.join(other.index, how=how)
@@ -786,6 +788,7 @@ def fillna(self, value=None, method='pad', inplace=False, limit=None):
return self._constructor(new_series, index=self.index,
columns=self.columns)
+
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py
index 62c9d096d8dfa..9f59b9108a6b0 100644
--- a/pandas/sparse/list.py
+++ b/pandas/sparse/list.py
@@ -3,6 +3,7 @@
from pandas.sparse.array import SparseArray
import pandas._sparse as splib
+
class SparseList(object):
"""
Data structure for accumulating data to be converted into a
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index b843b653ab439..bd5a2785aba2b 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -1,6 +1,6 @@
"""
-Data structures for sparse float data. Life is made simpler by dealing only with
-float64 data
+Data structures for sparse float data. Life is made simpler by dealing only
+with float64 data
"""
# pylint: disable=E1101,E1103,W0231
@@ -15,6 +15,7 @@
import pandas.core.common as com
+
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
@@ -97,7 +98,7 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
self.major_axis = major_axis
self.minor_axis = minor_axis
- def _consolidate_inplace(self): # pragma: no cover
+ def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
@@ -135,6 +136,7 @@ def values(self):
# need a special property for items to make the field assignable
_items = None
+
def _get_items(self):
return self._items
@@ -262,7 +264,7 @@ def to_frame(self, filter_observations=True):
# values are stacked column-major
indexer = minor * N + major
- counts.put(indexer, counts.take(indexer) + 1) # cuteness
+ counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
@@ -445,6 +447,7 @@ def minor_xs(self, key):
SparseWidePanel = SparsePanel
+
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index dfe78a81c6a59..70d35607573c2 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -1,6 +1,6 @@
"""
-Data structures for sparse float data. Life is made simpler by dealing only with
-float64 data
+Data structures for sparse float data. Life is made simpler by dealing only
+with float64 data
"""
# pylint: disable=E1101,E1103,W0231
@@ -25,9 +25,10 @@
from pandas.util.decorators import Appender
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
+
def _sparse_op_wrap(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
@@ -49,12 +50,13 @@ def wrapper(self, other):
sparse_index=self.sp_index,
fill_value=new_fill_value,
name=self.name)
- else: # pragma: no cover
+ else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
return wrapper
+
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
@@ -67,6 +69,7 @@ def _sparse_series_op(left, right, op, name):
return result
+
class SparseSeries(SparseArray, Series):
__array_priority__ = 15
@@ -98,7 +101,7 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block',
data = Series(data)
values, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
- elif np.isscalar(data): # pragma: no cover
+ elif np.isscalar(data): # pragma: no cover
if index is None:
raise Exception('must pass index!')
@@ -200,7 +203,6 @@ def __setstate__(self, state):
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
-
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
@@ -540,5 +542,6 @@ def combine_first(self, other):
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
+
class SparseTimeSeries(SparseSeries, TimeSeries):
pass
diff --git a/pandas/stats/common.py b/pandas/stats/common.py
index 492a7a7673397..c3034dbc390bf 100644
--- a/pandas/stats/common.py
+++ b/pandas/stats/common.py
@@ -13,14 +13,14 @@ def _get_cluster_type(cluster_type):
raise Exception('Unrecognized cluster type: %s' % cluster_type)
_CLUSTER_TYPES = {
- 0 : 'time',
- 1 : 'entity'
+ 0: 'time',
+ 1: 'entity'
}
_WINDOW_TYPES = {
- 0 : 'full_sample',
- 1 : 'rolling',
- 2 : 'expanding'
+ 0: 'full_sample',
+ 1: 'rolling',
+ 2: 'expanding'
}
@@ -37,6 +37,7 @@ def _get_window_type(window_type):
else: # pragma: no cover
raise Exception('Unrecognized window type: %s' % window_type)
+
def banner(text, width=80):
"""
diff --git a/pandas/stats/fama_macbeth.py b/pandas/stats/fama_macbeth.py
index 586642f813a91..2c8a3a65bd5ac 100644
--- a/pandas/stats/fama_macbeth.py
+++ b/pandas/stats/fama_macbeth.py
@@ -6,6 +6,7 @@
import pandas.stats.common as common
from pandas.util.decorators import cache_readonly
+
def fama_macbeth(**kwargs):
"""Runs Fama-MacBeth regression.
@@ -24,6 +25,7 @@ def fama_macbeth(**kwargs):
return klass(**kwargs)
+
class FamaMacBeth(object):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
@@ -79,16 +81,16 @@ def t_stat(self):
@cache_readonly
def _results(self):
return {
- 'mean_beta' : self._mean_beta_raw,
- 'std_beta' : self._std_beta_raw,
- 't_stat' : self._t_stat_raw,
+ 'mean_beta': self._mean_beta_raw,
+ 'std_beta': self._std_beta_raw,
+ 't_stat': self._t_stat_raw,
}
@cache_readonly
def _coef_table(self):
buffer = StringIO()
buffer.write('%13s %13s %13s %13s %13s %13s\n' %
- ('Variable','Beta', 'Std Err','t-stat','CI 2.5%','CI 97.5%'))
+ ('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))
template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\n'
for i, name in enumerate(self._cols):
@@ -128,13 +130,14 @@ def summary(self):
--------------------------------End of Summary---------------------------------
"""
params = {
- 'formulaRHS' : ' + '.join(self._cols),
- 'nu' : len(self._beta_raw),
- 'coefTable' : self._coef_table,
+ 'formulaRHS': ' + '.join(self._cols),
+ 'nu': len(self._beta_raw),
+ 'coefTable': self._coef_table,
}
return template % params
+
class MovingFamaMacBeth(FamaMacBeth):
def __init__(self, y, x, window_type='rolling', window=10,
intercept=True, nw_lags=None, nw_lags_beta=None,
@@ -197,11 +200,12 @@ def _result_index(self):
@cache_readonly
def _results(self):
return {
- 'mean_beta' : self._mean_beta_raw[-1],
- 'std_beta' : self._std_beta_raw[-1],
- 't_stat' : self._t_stat_raw[-1],
+ 'mean_beta': self._mean_beta_raw[-1],
+ 'std_beta': self._std_beta_raw[-1],
+ 't_stat': self._t_stat_raw[-1],
}
+
def _calc_t_stat(beta, nw_lags_beta):
N = len(beta)
B = beta - beta.mean(0)
diff --git a/pandas/stats/interface.py b/pandas/stats/interface.py
index 603d3b8289226..ff87aa1c9af26 100644
--- a/pandas/stats/interface.py
+++ b/pandas/stats/interface.py
@@ -3,6 +3,7 @@
from pandas.stats.plm import PanelOLS, MovingPanelOLS, NonPooledPanelOLS
import pandas.stats.common as common
+
def ols(**kwargs):
"""Returns the appropriate OLS object depending on whether you need
simple or panel OLS, and a full-sample or rolling/expanding OLS.
diff --git a/pandas/stats/math.py b/pandas/stats/math.py
index c048435493c13..1b926fa5ee7c0 100644
--- a/pandas/stats/math.py
+++ b/pandas/stats/math.py
@@ -6,6 +6,7 @@
import numpy as np
import numpy.linalg as linalg
+
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
@@ -20,6 +21,7 @@ def rank(X, cond=1.0e-12):
else:
return int(not np.alltrue(np.equal(X, 0.)))
+
def solve(a, b):
"""Returns the solution of A X = B."""
try:
@@ -27,6 +29,7 @@ def solve(a, b):
except linalg.LinAlgError:
return np.dot(linalg.pinv(a), b)
+
def inv(a):
"""Returns the inverse of A."""
try:
@@ -34,10 +37,12 @@ def inv(a):
except linalg.LinAlgError:
return np.linalg.pinv(a)
+
def is_psd(m):
eigvals = linalg.eigvals(m)
return np.isreal(eigvals).all() and (eigvals >= 0).all()
+
def newey_west(m, max_lags, nobs, df, nw_overlap=False):
"""
Compute Newey-West adjusted covariance matrix, taking into account
@@ -84,6 +89,7 @@ def newey_west(m, max_lags, nobs, df, nw_overlap=False):
return Xeps
+
def calc_F(R, r, beta, var_beta, nobs, df):
"""
Computes the standard F-test statistic for linear restriction
@@ -120,4 +126,3 @@ def calc_F(R, r, beta, var_beta, nobs, df):
p_value = 1 - f.cdf(F, q, nobs - df)
return F, (q, nobs - df), p_value
-
diff --git a/pandas/stats/misc.py b/pandas/stats/misc.py
index 7fc2892ad3c2f..e81319cb79c94 100644
--- a/pandas/stats/misc.py
+++ b/pandas/stats/misc.py
@@ -6,7 +6,7 @@
def zscore(series):
- return (series - series.mean()) / np.std(series, ddof = 0)
+ return (series - series.mean()) / np.std(series, ddof=0)
def correl_ts(frame1, frame2):
@@ -36,6 +36,7 @@ def correl_ts(frame1, frame2):
return Series(results)
+
def correl_xs(frame1, frame2):
return correl_ts(frame1.T, frame2.T)
@@ -124,6 +125,7 @@ def bucket(series, k, by=None):
return DataFrame(mat, index=series.index, columns=np.arange(k) + 1)
+
def _split_quantile(arr, k):
arr = np.asarray(arr)
mask = np.isfinite(arr)
@@ -132,6 +134,7 @@ def _split_quantile(arr, k):
return np.array_split(np.arange(n)[mask].take(order), k)
+
def bucketcat(series, cats):
"""
Produce DataFrame representing quantiles of a Series
@@ -162,6 +165,7 @@ def bucketcat(series, cats):
return DataFrame(data, columns=unique_labels)
+
def bucketpanel(series, bins=None, by=None, cat=None):
"""
Bucket data by two Series to create summary panel
@@ -198,7 +202,9 @@ def bucketpanel(series, bins=None, by=None, cat=None):
xcat, ycat = cat
return _bucketpanel_cat(series, xcat, ycat)
else:
- raise Exception('must specify either values or categories to bucket by')
+ raise Exception('must specify either values or categories '
+ 'to bucket by')
+
def _bucketpanel_by(series, xby, yby, xbins, ybins):
xby = xby.reindex(series.index)
@@ -229,6 +235,7 @@ def relabel(key):
return bucketed.rename(columns=relabel)
+
def _bucketpanel_cat(series, xcat, ycat):
xlabels, xmapping = _intern(xcat)
ylabels, ymapping = _intern(ycat)
@@ -256,6 +263,7 @@ def _bucketpanel_cat(series, xcat, ycat):
return result
+
def _intern(values):
# assumed no NaN values
values = np.asarray(values)
@@ -273,6 +281,7 @@ def _uniquify(xlabels, ylabels, xbins, ybins):
return _xpiece + _ypiece
+
def _bucket_labels(series, k):
arr = np.asarray(series)
mask = np.isfinite(arr)
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index cfdc4aa8a23ab..b805a9dca128c 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -26,7 +26,7 @@
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply', 'expanding_corr_pairwise']
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Docs
_doc_template = """
@@ -73,8 +73,8 @@
Either center of mass or span must be specified
EWMA is sometimes specified using a "span" parameter s, we have have that the
-decay parameter \alpha is related to the span as :math:`\alpha = 1 - 2 / (s + 1)
-= c / (1 + c)`
+decay parameter \alpha is related to the span as
+:math:`\alpha = 1 - 2 / (s + 1) = c / (1 + c)`
where c is the center of mass. Given a span, the associated center of mass is
:math:`c = (s - 1) / 2`
@@ -122,6 +122,8 @@
_bias_doc = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
+
+
def rolling_count(arg, window, freq=None, time_rule=None):
"""
Rolling count of number of non-NaN observations inside provided window.
@@ -151,6 +153,7 @@ def rolling_count(arg, window, freq=None, time_rule=None):
return return_hook(result)
+
@Substitution("Unbiased moving covariance", _binary_arg_flex, _flex_retval)
@Appender(_doc_template)
def rolling_cov(arg1, arg2, window, min_periods=None, time_rule=None):
@@ -161,16 +164,18 @@ def _get_cov(X, Y):
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(arg1, arg2, _get_cov)
+
@Substitution("Moving sample correlation", _binary_arg_flex, _flex_retval)
@Appender(_doc_template)
def rolling_corr(arg1, arg2, window, min_periods=None, time_rule=None):
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, time_rule)
- den = (rolling_std(a, window, min_periods, time_rule) *
+ den = (rolling_std(a, window, min_periods, time_rule) *
rolling_std(b, window, min_periods, time_rule))
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr)
+
def _flex_binary_moment(arg1, arg2, f):
if isinstance(arg1, np.ndarray) and isinstance(arg2, np.ndarray):
X, Y = _prep_binary(arg1, arg2)
@@ -197,6 +202,7 @@ def _flex_binary_moment(arg1, arg2, f):
else:
return _flex_binary_moment(arg2, arg1, f)
+
def rolling_corr_pairwise(df, window, min_periods=None):
"""
Computes pairwise rolling correlation matrices as Panel whose items are
@@ -226,6 +232,7 @@ def rolling_corr_pairwise(df, window, min_periods=None):
return Panel.from_dict(all_results).swapaxes('items', 'major')
+
def _rolling_moment(arg, window, func, minp, axis=0, freq=None,
time_rule=None, **kwargs):
"""
@@ -255,6 +262,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None,
return return_hook(result)
+
def _process_data_structure(arg, kill_inf=True):
if isinstance(arg, DataFrame):
return_hook = lambda v: type(arg)(v, index=arg.index,
@@ -276,9 +284,10 @@ def _process_data_structure(arg, kill_inf=True):
return return_hook, values
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Exponential moving moments
+
def _get_center_of_mass(com, span):
if span is not None:
if com is not None:
@@ -292,6 +301,7 @@ def _get_center_of_mass(com, span):
return float(com)
+
@Substitution("Exponentially-weighted moving average", _unary_arg, "")
@Appender(_ewm_doc)
def ewma(arg, com=None, span=None, min_periods=0, freq=None, time_rule=None,
@@ -309,10 +319,12 @@ def _ewma(v):
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
+
def _first_valid_index(arr):
# argmax scans from left
return notnull(arr).argmax() if len(arr) else 0
+
@Substitution("Exponentially-weighted moving variance", _unary_arg, _bias_doc)
@Appender(_ewm_doc)
def ewmvar(arg, com=None, span=None, min_periods=0, bias=False,
@@ -328,6 +340,7 @@ def ewmvar(arg, com=None, span=None, min_periods=0, bias=False,
return result
+
@Substitution("Exponentially-weighted moving std", _unary_arg, _bias_doc)
@Appender(_ewm_doc)
def ewmstd(arg, com=None, span=None, min_periods=0, bias=False,
@@ -338,6 +351,7 @@ def ewmstd(arg, com=None, span=None, min_periods=0, bias=False,
ewmvol = ewmstd
+
@Substitution("Exponentially-weighted moving covariance", _binary_arg, "")
@Appender(_ewm_doc)
def ewmcov(arg1, arg2, com=None, span=None, min_periods=0, bias=False,
@@ -349,13 +363,14 @@ def ewmcov(arg1, arg2, com=None, span=None, min_periods=0, bias=False,
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
- result = (mean(X*Y) - mean(X) * mean(Y))
+ result = (mean(X * Y) - mean(X) * mean(Y))
com = _get_center_of_mass(com, span)
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
+
@Substitution("Exponentially-weighted moving " "correlation", _binary_arg, "")
@Appender(_ewm_doc)
def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0,
@@ -368,7 +383,7 @@ def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0,
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
var = lambda x: ewmvar(x, com=com, span=span, min_periods=min_periods,
bias=True)
- return (mean(X*Y) - mean(X)*mean(Y)) / _zsqrt(var(X) * var(Y))
+ return (mean(X * Y) - mean(X) * mean(Y)) / _zsqrt(var(X) * var(Y))
def _zsqrt(x):
@@ -384,6 +399,7 @@ def _zsqrt(x):
return result
+
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
@@ -397,6 +413,7 @@ def _prep_binary(arg1, arg2):
#----------------------------------------------------------------------
# Python interface to Cython functions
+
def _conv_timerule(arg, freq, time_rule):
if time_rule is not None:
import warnings
@@ -412,6 +429,7 @@ def _conv_timerule(arg, freq, time_rule):
return arg
+
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
@@ -420,12 +438,14 @@ def _check_func(minp, window):
return max(p, minp)
return _check_func
+
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
+
def _rolling_func(func, desc, check_minp=_use_window):
@Substitution(desc, _unary_arg, _type_of_input)
@Appender(_doc_template)
@@ -455,6 +475,7 @@ def call_cython(arg, window, minp, **kwds):
rolling_kurt = _rolling_func(lib.roll_kurt, 'Unbiased moving kurtosis',
check_minp=_require_min_periods(4))
+
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
time_rule=None):
"""Moving quantile
@@ -480,6 +501,7 @@ def call_cython(arg, window, minp):
return _rolling_moment(arg, window, call_cython, min_periods,
freq=freq, time_rule=time_rule)
+
def rolling_apply(arg, window, func, min_periods=None, freq=None,
time_rule=None):
"""Generic moving function application
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index 0192dced6371a..d19898990022d 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -21,6 +21,7 @@
_FP_ERR = 1e-8
+
class OLS(object):
"""
Runs a full sample ordinary least squares regression.
@@ -221,7 +222,7 @@ def f_test(self, hypothesis):
eqs = hypothesis.split(',')
elif isinstance(hypothesis, list):
eqs = hypothesis
- else: # pragma: no cover
+ else: # pragma: no cover
raise Exception('hypothesis must be either string or list')
for equation in eqs:
row = np.zeros(len(x_names))
@@ -438,7 +439,7 @@ def predict(self, beta=None, x=None, fill_value=None,
else:
x = x.fillna(value=fill_value, method=fill_method, axis=axis)
if isinstance(x, Series):
- x = DataFrame({'x' : x})
+ x = DataFrame({'x': x})
if self._intercept:
x['intercept'] = 1.
@@ -500,10 +501,10 @@ def summary_as_matrix(self):
"""Returns the formatted results of the OLS as a DataFrame."""
results = self._results
beta = results['beta']
- data = {'beta' : results['beta'],
- 't-stat' : results['t_stat'],
- 'p-value' : results['p_value'],
- 'std err' : results['std_err']}
+ data = {'beta': results['beta'],
+ 't-stat': results['t_stat'],
+ 'p-value': results['p_value'],
+ 'std err': results['std_err']}
return DataFrame(data, beta.index).T
@cache_readonly
@@ -538,7 +539,7 @@ def summary(self):
f_stat = results['f_stat']
- bracketed = ['<%s>' %str(c) for c in results['beta'].index]
+ bracketed = ['<%s>' % str(c) for c in results['beta'].index]
formula = StringIO()
formula.write(bracketed[0])
@@ -554,21 +555,21 @@ def summary(self):
formula.write(' + ' + coef)
params = {
- 'bannerTop' : scom.banner('Summary of Regression Analysis'),
- 'bannerCoef' : scom.banner('Summary of Estimated Coefficients'),
- 'bannerEnd' : scom.banner('End of Summary'),
- 'formula' : formula.getvalue(),
- 'r2' : results['r2'],
- 'r2_adj' : results['r2_adj'],
- 'nobs' : results['nobs'],
- 'df' : results['df'],
- 'df_model' : results['df_model'],
- 'df_resid' : results['df_resid'],
- 'coef_table' : coef_table,
- 'rmse' : results['rmse'],
- 'f_stat' : f_stat['f-stat'],
- 'f_stat_shape' : '(%d, %d)' % (f_stat['DF X'], f_stat['DF Resid']),
- 'f_stat_p_value' : f_stat['p-value'],
+ 'bannerTop': scom.banner('Summary of Regression Analysis'),
+ 'bannerCoef': scom.banner('Summary of Estimated Coefficients'),
+ 'bannerEnd': scom.banner('End of Summary'),
+ 'formula': formula.getvalue(),
+ 'r2': results['r2'],
+ 'r2_adj': results['r2_adj'],
+ 'nobs': results['nobs'],
+ 'df': results['df'],
+ 'df_model': results['df_model'],
+ 'df_resid': results['df_resid'],
+ 'coef_table': coef_table,
+ 'rmse': results['rmse'],
+ 'f_stat': f_stat['f-stat'],
+ 'f_stat_shape': '(%d, %d)' % (f_stat['DF X'], f_stat['DF Resid']),
+ 'f_stat_p_value': f_stat['p-value'],
}
return template % params
@@ -576,7 +577,6 @@ def summary(self):
def __repr__(self):
return self.summary
-
@cache_readonly
def _time_obs_count(self):
# XXX
@@ -630,7 +630,7 @@ def _set_window(self, window_type, window, min_periods):
self._window = int(window)
self._min_periods = min_periods
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# "Public" results
@cache_readonly
@@ -745,7 +745,7 @@ def y_predict(self):
return Series(self._y_predict_raw[self._valid_obs_labels],
index=self._result_index)
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# "raw" attributes, calculations
@property
@@ -833,9 +833,10 @@ def _cum_xx(self, x):
slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
+
def slicer(df, dt):
i = _get_index(dt)
- return df.values[i:i+1, :]
+ return df.values[i:i + 1, :]
last = np.zeros((K, K))
@@ -858,9 +859,10 @@ def _cum_xy(self, x, y):
x_slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
+
def x_slicer(df, dt):
i = _get_index(dt)
- return df.values[i:i+1]
+ return df.values[i:i + 1]
_y_get_index = y.index.get_loc
_values = y.values
@@ -871,7 +873,7 @@ def y_slicer(df, dt):
else:
def y_slicer(df, dt):
i = _y_get_index(dt)
- return _values[i:i+1]
+ return _values[i:i + 1]
last = np.zeros(len(x.columns))
for i, date in enumerate(dates):
@@ -996,7 +998,7 @@ def _resid_stats(self):
after=date))
weights_slice = weights.truncate(prior_date, date)
demeaned = Y_slice - np.average(Y_slice, weights=weights_slice)
- SS_total = (weights_slice*demeaned**2).sum()
+ SS_total = (weights_slice * demeaned ** 2).sum()
else:
SS_total = ((Y_slice - Y_slice.mean()) ** 2).sum()
@@ -1008,9 +1010,9 @@ def _resid_stats(self):
uncentered_sst.append(SST_uncentered)
return {
- 'sse' : np.array(sse),
- 'centered_tss' : np.array(sst),
- 'uncentered_tss' : np.array(uncentered_sst),
+ 'sse': np.array(sse),
+ 'centered_tss': np.array(sst),
+ 'uncentered_tss': np.array(uncentered_sst),
}
@cache_readonly
@@ -1166,7 +1168,7 @@ def _results(self):
value = value[self.beta.index[-1]]
elif isinstance(value, DataFrame):
value = value.xs(self.beta.index[-1])
- else: # pragma: no cover
+ else: # pragma: no cover
raise Exception('Problem retrieving %s' % result)
results[result] = value
@@ -1226,6 +1228,7 @@ def _enough_obs(self):
return self._nobs_raw >= max(self._min_periods,
len(self._x.columns) + 1)
+
def _safe_update(d, other):
"""
Combine dictionaries with non-overlapping keys
@@ -1236,6 +1239,7 @@ def _safe_update(d, other):
d[k] = v
+
def _filter_data(lhs, rhs, weights=None):
"""
Cleans the input for single OLS.
@@ -1257,7 +1261,7 @@ def _filter_data(lhs, rhs, weights=None):
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
- lhs = DataFrame({'__y__' : lhs}, dtype=float)
+ lhs = DataFrame({'__y__': lhs}, dtype=float)
pre_filt_rhs = rhs.dropna(how='any')
combined = rhs.join(lhs, how='outer')
@@ -1294,12 +1298,12 @@ def _combine_rhs(rhs):
elif isinstance(rhs, dict):
for name, value in rhs.iteritems():
if isinstance(value, Series):
- _safe_update(series, {name : value})
+ _safe_update(series, {name: value})
elif isinstance(value, (dict, DataFrame)):
_safe_update(series, value)
- else: # pragma: no cover
+ else: # pragma: no cover
raise Exception('Invalid RHS data type: %s' % type(value))
- else: # pragma: no cover
+ else: # pragma: no cover
raise Exception('Invalid RHS type: %s' % type(rhs))
if not isinstance(series, DataFrame):
@@ -1311,7 +1315,7 @@ def _combine_rhs(rhs):
# MovingOLS and MovingPanelOLS
def _y_converter(y):
y = y.values.squeeze()
- if y.ndim == 0: # pragma: no cover
+ if y.ndim == 0: # pragma: no cover
return np.array([y])
else:
return y
@@ -1327,4 +1331,3 @@ def f_stat_to_dict(result):
result['p-value'] = p_value
return result
-
diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py
index 7b6f85b12b5db..7dde37822c02b 100644
--- a/pandas/stats/plm.py
+++ b/pandas/stats/plm.py
@@ -20,6 +20,7 @@
import pandas.stats.math as math
from pandas.util.decorators import cache_readonly
+
class PanelOLS(OLS):
"""Implements panel OLS.
@@ -54,7 +55,7 @@ def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
self._T = len(self._index)
def log(self, msg):
- if self._verbose: # pragma: no cover
+ if self._verbose: # pragma: no cover
print msg
def _prepare_data(self):
@@ -268,7 +269,7 @@ def _add_categorical_dummies(self, panel, cat_mappings):
else:
to_exclude = mapped_name = dummies.columns[0]
- if mapped_name not in dummies.columns: # pragma: no cover
+ if mapped_name not in dummies.columns: # pragma: no cover
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
@@ -337,7 +338,7 @@ def _r2_raw(self):
if self._use_centered_tss:
SST = ((Y - np.mean(Y)) ** 2).sum()
else:
- SST = (Y**2).sum()
+ SST = (Y ** 2).sum()
return 1 - SSE / SST
@@ -427,6 +428,7 @@ def _time_has_obs(self):
def _nobs(self):
return len(self._y)
+
def _convertDummies(dummies, mapping):
# cleans up the names of the generated dummies
new_items = []
@@ -446,6 +448,7 @@ def _convertDummies(dummies, mapping):
return dummies
+
def _is_numeric(df):
for col in df:
if df[col].dtype.name == 'object':
@@ -453,6 +456,7 @@ def _is_numeric(df):
return True
+
def add_intercept(panel, name='intercept'):
"""
Add column of ones to input panel
@@ -471,6 +475,7 @@ def add_intercept(panel, name='intercept'):
return panel.consolidate()
+
class MovingPanelOLS(MovingOLS, PanelOLS):
"""Implements rolling/expanding panel OLS.
@@ -648,13 +653,14 @@ def _enough_obs(self):
# TODO: write unit tests for this
rank_threshold = len(self._x.columns) + 1
- if self._min_obs < rank_threshold: # pragma: no cover
+ if self._min_obs < rank_threshold: # pragma: no cover
warnings.warn('min_obs is smaller than rank of X matrix')
enough_observations = self._nobs_raw >= self._min_obs
enough_time_periods = self._window_time_obs >= self._min_periods
return enough_time_periods & enough_observations
+
def create_ols_dict(attr):
def attr_getter(self):
d = {}
@@ -666,9 +672,11 @@ def attr_getter(self):
return attr_getter
+
def create_ols_attr(attr):
return property(create_ols_dict(attr))
+
class NonPooledPanelOLS(object):
"""Implements non-pooled panel OLS.
@@ -774,6 +782,7 @@ def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis,
return np.dot(xx_inv, np.dot(xox, xx_inv))
+
def _xx_time_effects(x, y):
"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
@@ -790,5 +799,3 @@ def _xx_time_effects(x, y):
count = count[selector]
return xx - np.dot(xt.T / count, xt)
-
-
diff --git a/pandas/stats/var.py b/pandas/stats/var.py
index e2b5a2ce7c466..a4eb8920a3b40 100644
--- a/pandas/stats/var.py
+++ b/pandas/stats/var.py
@@ -10,6 +10,7 @@
from pandas.stats.math import inv
from pandas.stats.ols import _combine_rhs
+
class VAR(object):
"""
Estimates VAR(p) regression on multivariate time series data
@@ -164,8 +165,8 @@ def granger_causality(self):
p_value_mat = DataFrame(p_value_dict)
return {
- 'f-stat' : f_stat_mat,
- 'p-value' : p_value_mat,
+ 'f-stat': f_stat_mat,
+ 'p-value': p_value_mat,
}
@cache_readonly
@@ -226,13 +227,13 @@ def summary(self):
%(banner_end)s
"""
params = {
- 'banner_top' : common.banner('Summary of VAR'),
- 'banner_coef' : common.banner('Summary of Estimated Coefficients'),
- 'banner_end' : common.banner('End of Summary'),
- 'coef_table' : self.beta,
- 'aic' : self.aic,
- 'bic' : self.bic,
- 'nobs' : self._nobs,
+ 'banner_top': common.banner('Summary of VAR'),
+ 'banner_coef': common.banner('Summary of Estimated Coefficients'),
+ 'banner_end': common.banner('End of Summary'),
+ 'coef_table': self.beta,
+ 'aic': self.aic,
+ 'bic': self.bic,
+ 'nobs': self._nobs,
}
return template % params
@@ -410,8 +411,8 @@ def _ic(self):
k = self._p * (self._k * self._p + 1)
n = self._nobs * self._k
- return {'aic' : 2 * k + n * np.log(RSS / n),
- 'bic' : n * np.log(RSS / n) + k * np.log(n)}
+ return {'aic': 2 * k + n * np.log(RSS / n),
+ 'bic': n * np.log(RSS / n) + k * np.log(n)}
@cache_readonly
def _k(self):
@@ -478,6 +479,7 @@ def _sigma(self):
def __repr__(self):
return self.summary
+
def lag_select(data, max_lags=5, ic=None):
"""
Select number of lags based on a variety of information criteria
@@ -496,6 +498,7 @@ def lag_select(data, max_lags=5, ic=None):
"""
pass
+
class PanelVAR(VAR):
"""
Performs Vector Autoregression on panel data.
@@ -567,14 +570,17 @@ def _prep_panel_data(data):
return Panel.fromDict(data)
+
def _drop_incomplete_rows(array):
mask = np.isfinite(array).all(1)
indices = np.arange(len(array))[mask]
return array.take(indices, 0)
+
def _make_param_name(lag, name):
return 'L%d.%s' % (lag, name)
+
def chain_dot(*matrices):
"""
Returns the dot product of the given matrices.
diff --git a/pandas/tools/describe.py b/pandas/tools/describe.py
index 43e3051da468c..eca5a800b3c6c 100644
--- a/pandas/tools/describe.py
+++ b/pandas/tools/describe.py
@@ -1,5 +1,6 @@
from pandas.core.series import Series
+
def value_range(df):
"""
Return the minimum and maximum of a dataframe in a series object
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 4a50016c39927..d92ed1cb01c42 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -21,6 +21,7 @@
import pandas.lib as lib
+
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
@@ -145,6 +146,7 @@ def _merger(x, y):
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
+
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
@@ -182,7 +184,8 @@ def get_result(self):
# this is a bit kludgy
ldata, rdata = self._get_merge_data()
- # TODO: more efficiently handle group keys to avoid extra consolidation!
+ # TODO: more efficiently handle group keys to avoid extra
+ # consolidation!
join_op = _BlockJoinOperation([ldata, rdata], join_index,
[left_indexer, right_indexer], axis=1,
copy=self.copy)
@@ -427,7 +430,7 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
for x in group_sizes:
max_groups *= long(x)
- if max_groups > 2**63: # pragma: no cover
+ if max_groups > 2 ** 63: # pragma: no cover
raise MergeError('Combinatorial explosion! (boom)')
left_group_key, right_group_key, max_groups = \
@@ -437,7 +440,6 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
return join_func(left_group_key, right_group_key, max_groups)
-
class _OrderedMerge(_MergeOperation):
def __init__(self, left, right, on=None, by=None, left_on=None,
@@ -452,10 +454,9 @@ def __init__(self, left, right, on=None, by=None, left_on=None,
left_index=left_index,
right_index=right_index,
how='outer', suffixes=suffixes,
- sort=True # sorts when factorizing
+ sort=True # sorts when factorizing
)
-
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
@@ -503,6 +504,7 @@ def _get_multiindex_indexer(join_keys, index, sort=False):
return left_indexer, right_indexer
+
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
@@ -513,6 +515,7 @@ def _get_single_indexer(join_key, index, sort=False):
return left_indexer, right_indexer
+
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
join_index = left_ax
left_indexer = None
@@ -544,10 +547,10 @@ def _right_outer_join(x, y, max_groups):
return left_indexer, right_indexer
_join_functions = {
- 'inner' : lib.inner_join,
- 'left' : lib.left_outer_join,
- 'right' : _right_outer_join,
- 'outer' : lib.full_outer_join,
+ 'inner': lib.inner_join,
+ 'left': lib.left_outer_join,
+ 'right': _right_outer_join,
+ 'outer': lib.full_outer_join,
}
@@ -584,6 +587,7 @@ def _factorize_keys(lk, rk, sort=True):
return llab, rlab, count
+
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
@@ -602,6 +606,7 @@ def _sort_labels(uniques, left, right):
return new_left, new_right
+
class _BlockJoinOperation(object):
"""
BlockJoinOperation made generic for N DataFrames
@@ -713,7 +718,6 @@ def _merge_blocks(self, merge_chunks):
return make_block(out, new_block_items, self.result_items)
-
class _JoinUnit(object):
"""
Blocks plus indexer
@@ -762,6 +766,7 @@ def reindex_block(self, block, axis, ref_items, copy=True):
result.ref_items = ref_items
return result
+
def _may_need_upcasting(blocks):
for block in blocks:
if isinstance(block, (IntBlock, BoolBlock)):
@@ -788,18 +793,21 @@ def _upcast_blocks(blocks):
# use any ref_items
return _consolidate(new_blocks, newb.ref_items)
+
def _get_all_block_kinds(blockmaps):
kinds = set()
for mapping in blockmaps:
kinds |= set(mapping)
return kinds
+
def _get_merge_block_kinds(blockmaps):
kinds = set()
for _, mapping in blockmaps:
kinds |= set(mapping)
return kinds
+
def _get_block_dtype(blocks):
if len(blocks) == 0:
return object
@@ -816,6 +824,7 @@ def _get_block_dtype(blocks):
#----------------------------------------------------------------------
# Concatenate DataFrame objects
+
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False):
"""
@@ -884,8 +893,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
- raise ValueError('Only can inner (intersect) or outer (union) join '
- 'the other axis')
+ raise ValueError('Only can inner (intersect) or outer (union) '
+ 'join the other axis')
if isinstance(objs, dict):
if keys is None:
@@ -1149,6 +1158,7 @@ def _maybe_check_integrity(self, concat_index):
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
+
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
@@ -1250,6 +1260,5 @@ def _should_fill(lname, rname):
return lname == rname
-
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 146cba82788e9..bed1fe2212746 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -126,6 +126,7 @@ def pivot_table(data, values=None, rows=None, cols=None, aggfunc='mean',
DataFrame.pivot_table = pivot_table
+
def _add_margins(table, data, values, rows=None, cols=None, aggfunc=np.mean):
grand_margin = {}
for k, v in data[values].iteritems():
@@ -142,7 +143,6 @@ def _add_margins(table, data, values, rows=None, cols=None, aggfunc=np.mean):
table_pieces = []
margin_keys = []
-
def _all_key(key):
return (key, 'All') + ('',) * (len(cols) - 1)
@@ -199,6 +199,7 @@ def _all_key(key):
return result
+
def _convert_by(by):
if by is None:
by = []
@@ -209,6 +210,7 @@ def _convert_by(by):
by = list(by)
return by
+
def crosstab(rows, cols, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False):
"""
@@ -284,6 +286,7 @@ def crosstab(rows, cols, values=None, rownames=None, colnames=None,
aggfunc=aggfunc, margins=margins)
return table
+
def _get_names(arrs, names, prefix='row'):
if names is None:
names = []
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 34754a23ba5b4..a2cc21e23c47b 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -15,14 +15,15 @@
from pandas.tseries.frequencies import get_period_alias, get_base_alias
from pandas.tseries.offsets import DateOffset
-try: # mpl optional
+try: # mpl optional
import pandas.tseries.converter as conv
conv.register()
except ImportError:
pass
+
def _get_standard_kind(kind):
- return {'density' : 'kde'}.get(kind, kind)
+ return {'density': 'kde'}.get(kind, kind)
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
@@ -120,8 +121,8 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
# ax.grid(b=grid)
axes[0, 0].yaxis.set_visible(False)
- axes[n-1, n-1].xaxis.set_visible(False)
- axes[n-1, n-1].yaxis.set_visible(False)
+ axes[n - 1, n - 1].xaxis.set_visible(False)
+ axes[n - 1, n - 1].yaxis.set_visible(False)
axes[0, n - 1].yaxis.tick_right()
for ax in axes.flat:
@@ -135,10 +136,12 @@ def _gca():
import matplotlib.pyplot as plt
return plt.gca()
+
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
+
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
@@ -148,6 +151,7 @@ def _get_marker_compat(marker):
return 'o'
return marker
+
def radviz(frame, class_column, ax=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
@@ -232,6 +236,7 @@ def normalize(series):
ax.axis('equal')
return ax
+
def andrews_curves(data, class_column, ax=None, samples=200):
"""
Parameters:
@@ -243,6 +248,7 @@ def andrews_curves(data, class_column, ax=None, samples=200):
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
import random
+
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
@@ -256,6 +262,7 @@ def f(x):
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
+
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
@@ -280,6 +287,7 @@ def random_color(column):
ax.grid()
return ax
+
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
@@ -289,7 +297,8 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
- kwds: optional keyword arguments for plotting commands, must be accepted by both hist and plot
+ kwds: optional keyword arguments for plotting commands, must be accepted
+ by both hist and plot
Returns:
--------
@@ -336,6 +345,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
+
def parallel_coordinates(data, class_column, cols=None, ax=None, **kwds):
"""Parallel coordinates plotting.
@@ -353,6 +363,7 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, **kwds):
"""
import matplotlib.pyplot as plt
import random
+
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
@@ -392,6 +403,7 @@ def random_color(column):
ax.grid()
return ax
+
def lag_plot(series, ax=None, **kwds):
"""Lag plot for time series.
@@ -416,6 +428,7 @@ def lag_plot(series, ax=None, **kwds):
ax.scatter(y1, y2, **kwds)
return ax
+
def autocorrelation_plot(series, ax=None):
"""Autocorrelation plot for time series.
@@ -435,23 +448,25 @@ def autocorrelation_plot(series, ax=None):
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
+
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = map(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
- ax.axhline(y=z99/np.sqrt(n), linestyle='--', color='grey')
- ax.axhline(y=z95/np.sqrt(n), color='grey')
+ ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
+ ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
- ax.axhline(y=-z95/np.sqrt(n), color='grey')
- ax.axhline(y=-z99/np.sqrt(n), linestyle='--', color='grey')
+ ax.axhline(y=-z95 / np.sqrt(n), color='grey')
+ ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y)
ax.grid()
return ax
+
def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False,
figsize=None, layout=None, sharex=False, sharey=False,
rot=90):
@@ -590,7 +605,7 @@ def _maybe_right_yaxis(self, ax):
orig_ax, new_ax = ax, ax.twinx()
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
- if len(orig_ax.get_lines()) == 0: # no data on left y
+ if len(orig_ax.get_lines()) == 0: # no data on left y
orig_ax.get_yaxis().set_visible(False)
if len(new_ax.get_lines()) == 0:
@@ -795,6 +810,7 @@ def _get_style(self, i, col_name):
return style or None
+
class KdePlot(MPLPlot):
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
@@ -830,6 +846,7 @@ def _post_plot_logic(self):
for ax in self.axes:
ax.legend(loc='best')
+
class LinePlot(MPLPlot):
def __init__(self, data, **kwargs):
@@ -861,9 +878,9 @@ def _use_dynamic_x(self):
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
- if freq is None: # convert irregular if axes has freq info
+ if freq is None: # convert irregular if axes has freq info
freq = ax_freq
- else: # do not use tsplot if irregular was plotted first
+ else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
@@ -887,6 +904,7 @@ def _make_plot(self):
x = self._get_xticks(convert_period=True)
has_colors, colors = self._get_colors()
+
def _maybe_add_color(kwargs, style, i):
if (not has_colors and
(style is None or re.match('[a-z]+', style) is None)
@@ -945,14 +963,14 @@ def to_leg_label(label, i):
return label
if isinstance(data, Series):
- ax = self._get_ax(0) #self.axes[0]
+ ax = self._get_ax(0) # self.axes[0]
style = self.style or ''
label = com.pprint_thing(self.label)
kwds = kwargs.copy()
_maybe_add_color(kwds, style, 0)
- newlines = tsplot(data, plotf, ax=ax, label=label, style=self.style,
- **kwds)
+ newlines = tsplot(data, plotf, ax=ax, label=label,
+ style=self.style, **kwds)
ax.grid(self.grid)
lines.append(newlines[0])
leg_label = to_leg_label(label, 0)
@@ -1059,7 +1077,7 @@ def _post_plot_logic(self):
class BarPlot(MPLPlot):
- _default_rot = {'bar' : 90, 'barh' : 0}
+ _default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.stacked = kwargs.pop('stacked', False)
@@ -1088,7 +1106,7 @@ def _make_plot(self):
rects = []
labels = []
- ax = self._get_ax(0) #self.axes[0]
+ ax = self._get_ax(0) # self.axes[0]
bar_f = self.bar_f
@@ -1102,7 +1120,7 @@ def _make_plot(self):
kwds['color'] = colors[i % len(colors)]
if self.subplots:
- ax = self._get_ax(i) #self.axes[i]
+ ax = self._get_ax(i) # self.axes[i]
rect = bar_f(ax, self.ax_pos, y, 0.5, start=pos_prior, **kwds)
ax.set_title(label)
elif self.stacked:
@@ -1149,6 +1167,7 @@ def _post_plot_logic(self):
#if self.subplots and self.legend:
# self.axes[0].legend(loc='best')
+
class BoxPlot(MPLPlot):
pass
@@ -1159,9 +1178,10 @@ class HistPlot(MPLPlot):
def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
sharey=False, use_index=True, figsize=None, grid=False,
- legend=True, rot=None, ax=None, style=None, title=None, xlim=None,
- ylim=None, logy=False, xticks=None, yticks=None, kind='line',
- sort_columns=False, fontsize=None, secondary_y=False, **kwds):
+ legend=True, rot=None, ax=None, style=None, title=None,
+ xlim=None, ylim=None, logy=False, xticks=None, yticks=None,
+ kind='line', sort_columns=False, fontsize=None,
+ secondary_y=False, **kwds):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
@@ -1255,6 +1275,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
else:
return plot_obj.axes[0]
+
def plot_series(series, label=None, kind='line', use_index=True, rot=None,
xticks=None, yticks=None, xlim=None, ylim=None,
ax=None, style=None, grid=None, logy=False, secondary_y=False,
@@ -1330,6 +1351,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
return plot_obj.ax
+
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, **kwds):
"""
@@ -1354,7 +1376,7 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None,
"""
from pandas import Series, DataFrame
if isinstance(data, Series):
- data = DataFrame({'x' : data})
+ data = DataFrame({'x': data})
column = 'x'
def plot_group(grouped, ax):
@@ -1411,6 +1433,7 @@ def plot_group(grouped, ax):
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
+
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
@@ -1419,7 +1442,7 @@ def format_date_labels(ax, rot):
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
- except Exception: # pragma: no cover
+ except Exception: # pragma: no cover
pass
@@ -1515,6 +1538,7 @@ def hist_frame(data, grid=True, xlabelsize=None, xrot=None,
return axes
+
def hist_series(self, ax=None, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, **kwds):
"""
@@ -1563,6 +1587,7 @@ def hist_series(self, ax=None, grid=True, xlabelsize=None, xrot=None,
return ax
+
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, figsize=None, **kwds):
"""
@@ -1628,6 +1653,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
grid=grid, figsize=figsize, **kwds)
return ret
+
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None):
@@ -1672,6 +1698,7 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
return fig, axes
+
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None):
@@ -1710,6 +1737,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None,
return fig, axes
+
def _get_layout(nplots):
if nplots == 1:
return (1, 1)
@@ -1729,6 +1757,7 @@ def _get_layout(nplots):
# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0
+
def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, secondary_y=False, data=None,
**fig_kw):
@@ -1817,7 +1846,7 @@ def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
- nplots = nrows*ncols
+ nplots = nrows * ncols
axarr = np.empty(nplots, dtype=object)
def on_right(i):
@@ -1854,18 +1883,18 @@ def on_right(i):
if nplots > 1:
if sharex and nrows > 1:
for i, ax in enumerate(axarr):
- if np.ceil(float(i + 1) / ncols) < nrows: # only last row
+ if np.ceil(float(i + 1) / ncols) < nrows: # only last row
[label.set_visible(False) for label in ax.get_xticklabels()]
if sharey and ncols > 1:
for i, ax in enumerate(axarr):
- if (i % ncols) != 0: # only first column
+ if (i % ncols) != 0: # only first column
[label.set_visible(False) for label in ax.get_yticklabels()]
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
- if nplots==1:
+ if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index 7a2101a967942..cc4c4192cb737 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -66,7 +66,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
if not np.iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
- try: # for array-like
+ try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
@@ -79,13 +79,13 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
- if mn == mx: # adjust end points before binning
+ if mn == mx: # adjust end points before binning
mn -= .001 * mn
mx += .001 * mx
- bins = np.linspace(mn, mx, bins+1, endpoint=True)
- else: # adjust end points after binning
- bins = np.linspace(mn, mx, bins+1, endpoint=True)
- adj = (mx - mn) * 0.001 # 0.1% of the range
+ bins = np.linspace(mn, mx, bins + 1, endpoint=True)
+ else: # adjust end points after binning
+ bins = np.linspace(mn, mx, bins + 1, endpoint=True)
+ adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
@@ -101,7 +101,6 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=include_lowest)
-
def qcut(x, q, labels=None, retbins=False, precision=3):
"""
Quantile-based discretization function. Discretize variable into
@@ -210,6 +209,7 @@ def _format_label(x, precision=3):
else:
return str(x)
+
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py
index e3a49d03f72ad..78455a5e46259 100644
--- a/pandas/tseries/converter.py
+++ b/pandas/tseries/converter.py
@@ -20,6 +20,7 @@
from pandas.tseries.frequencies import FreqGroup
from pandas.tseries.period import Period, PeriodIndex
+
def register():
units.registry[pydt.time] = TimeConverter()
units.registry[lib.Timestamp] = DatetimeConverter()
@@ -27,11 +28,13 @@ def register():
units.registry[pydt.datetime] = DatetimeConverter()
units.registry[Period] = PeriodConverter()
+
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
+
def time2num(d):
if isinstance(d, basestring):
parsed = tools.to_datetime(d)
@@ -42,6 +45,7 @@ def time2num(d):
return _to_ordinalf(d)
return d
+
class TimeConverter(units.ConversionInterface):
@staticmethod
@@ -69,6 +73,7 @@ def axisinfo(unit, axis):
def default_units(x, axis):
return 'time'
+
### time formatter
class TimeFormatter(Formatter):
@@ -90,6 +95,7 @@ def __call__(self, x, pos=0):
### Period Conversion
+
class PeriodConverter(dates.DateConverter):
@staticmethod
@@ -106,6 +112,7 @@ def convert(values, units, axis):
return [get_datevalue(x, axis.freq) for x in values]
return values
+
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
@@ -119,9 +126,10 @@ def get_datevalue(date, freq):
raise ValueError("Unrecognizable date '%s'" % date)
HOURS_PER_DAY = 24.
-MINUTES_PER_DAY = 60.*HOURS_PER_DAY
-SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
-MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
+MINUTES_PER_DAY = 60. * HOURS_PER_DAY
+SECONDS_PER_DAY = 60. * MINUTES_PER_DAY
+MUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY
+
def _dt_to_float_ordinal(dt):
"""
@@ -132,6 +140,7 @@ def _dt_to_float_ordinal(dt):
base = dates.date2num(dt)
return base
+
### Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@@ -184,8 +193,8 @@ def axisinfo(unit, axis):
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
- return units.AxisInfo( majloc=majloc, majfmt=majfmt, label='',
- default_limits=(datemin, datemax))
+ return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
+ default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
@@ -196,23 +205,23 @@ def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
self.scaled = {
- 365.0 : '%Y',
- 30. : '%b %Y',
- 1.0 : '%b %d %Y',
- 1. / 24. : '%H:%M:%S',
- 1. / 24. / 3600. / 1000. : '%H:%M:%S.%f'
+ 365.0: '%Y',
+ 30.: '%b %Y',
+ 1.0: '%b %d %Y',
+ 1. / 24.: '%H:%M:%S',
+ 1. / 24. / 3600. / 1000.: '%H:%M:%S.%f'
}
def _get_fmt(self, x):
- scale = float( self._locator._get_unit() )
+ scale = float(self._locator._get_unit())
fmt = self.defaultfmt
for k in sorted(self.scaled):
- if k >= scale:
- fmt = self.scaled[k]
- break
+ if k >= scale:
+ fmt = self.scaled[k]
+ break
return fmt
@@ -221,6 +230,7 @@ def __call__(self, x, pos=0):
self._formatter = dates.DateFormatter(fmt, self._tz)
return self._formatter(x, pos)
+
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
@@ -245,6 +255,7 @@ def get_locator(self, dmin, dmax):
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
+
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
@@ -265,10 +276,12 @@ def get_unit_generic(freq):
def __call__(self):
# if no data have been set, this will tank with a ValueError
- try: dmin, dmax = self.viewlim_to_dt()
- except ValueError: return []
+ try:
+ dmin, dmax = self.viewlim_to_dt()
+ except ValueError:
+ return []
- if dmin>dmax:
+ if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
@@ -276,13 +289,13 @@ def __call__(self):
try:
start = dmin - delta
except ValueError:
- start = _from_ordinal( 1.0 )
+ start = _from_ordinal(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
- stop = _from_ordinal( 3652059.9999999 )
+ stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
@@ -306,7 +319,7 @@ def __call__(self):
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
- st = _from_ordinal(dates.date2num(dmin)) # strip tz
+ st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
@@ -328,7 +341,7 @@ def autoscale(self):
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
- if dmin>dmax:
+ if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
@@ -343,7 +356,7 @@ def autoscale(self):
stop = dmax + delta
except ValueError:
# The magic number!
- stop = _from_ordinal( 3652059.9999999 )
+ stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
@@ -357,18 +370,19 @@ def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
- hour, remainder = divmod(24*remainder, 1)
- minute, remainder = divmod(60*remainder, 1)
- second, remainder = divmod(60*remainder, 1)
- microsecond = int(1e6*remainder)
- if microsecond<10: microsecond=0 # compensate for rounding errors
+ hour, remainder = divmod(24 * remainder, 1)
+ minute, remainder = divmod(60 * remainder, 1)
+ second, remainder = divmod(60 * remainder, 1)
+ microsecond = int(1e6 * remainder)
+ if microsecond < 10:
+ microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
- dt += timedelta(microseconds = 1e6 - microsecond)
+ dt += timedelta(microseconds=1e6 - microsecond)
return dt
@@ -378,6 +392,7 @@ def _from_ordinal(x, tz=None):
#---- --- Locators ---
##### -------------------------------------------------------------------------
+
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
@@ -399,6 +414,7 @@ def _get_default_annual_spacing(nyears):
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
+
def period_break(dates, period):
"""
Returns the indices where the given period changes.
@@ -414,6 +430,7 @@ def period_break(dates, period):
previous = getattr(dates - 1, period)
return (current - previous).nonzero()[0]
+
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
@@ -429,6 +446,7 @@ def has_level_label(label_flags, vmin):
else:
return True
+
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
@@ -439,7 +457,7 @@ def _daily_finder(vmin, vmax, freq):
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
@@ -453,7 +471,7 @@ def _daily_finder(vmin, vmax, freq):
elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
- else: # pragma: no cover
+ else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
@@ -489,7 +507,7 @@ def first_label(label_flags):
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
- _prev_hour = (dates_-1).hour
+ _prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
@@ -503,7 +521,7 @@ def _hour_finder(label_interval, force_year_start):
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
- _prev_minute = (dates_-1).minute
+ _prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
@@ -516,7 +534,7 @@ def _minute_finder(label_interval):
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
- _prev_second = (dates_-1).second
+ _prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
@@ -748,6 +766,7 @@ def _quarterly_finder(vmin, vmax, freq):
#..............
return info
+
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
@@ -767,6 +786,7 @@ def _annual_finder(vmin, vmax, freq):
#..............
return info
+
def get_finder(freq):
if isinstance(freq, basestring):
freq = frequencies.get_freq(freq)
@@ -776,14 +796,15 @@ def get_finder(freq):
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
- elif freq ==FreqGroup.FR_MTH:
+ elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
- else: # pragma: no cover
+ else: # pragma: no cover
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
+
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
@@ -839,7 +860,7 @@ def __call__(self):
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
- else: # pragma: no cover
+ else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
@@ -921,11 +942,10 @@ def set_locs(self, locs):
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
- #
+
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
-
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 77f03dc4d1279..b9b2d28e1595a 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -9,6 +9,7 @@
import pandas.core.common as com
import pandas.lib as lib
+
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
@@ -20,6 +21,7 @@ class FreqGroup(object):
FR_MIN = 8000
FR_SEC = 9000
+
def get_to_timestamp_base(base):
if base <= FreqGroup.FR_WK:
return FreqGroup.FR_DAY
@@ -27,18 +29,21 @@ def get_to_timestamp_base(base):
return FreqGroup.FR_SEC
return base
+
def get_freq_group(freq):
if isinstance(freq, basestring):
base, mult = get_freq_code(freq)
freq = base
return (freq // 1000) * 1000
+
def get_freq(freq):
if isinstance(freq, basestring):
base, mult = get_freq_code(freq)
freq = base
return freq
+
def get_freq_code(freqstr):
"""
@@ -93,171 +98,171 @@ def _get_freq_str(base, mult=1):
QuarterEnd, BQuarterBegin, BQuarterEnd)
_offset_map = {
- 'D' : Day(),
- 'B' : BDay(),
- 'H' : Hour(),
- 'T' : Minute(),
- 'S' : Second(),
- 'L' : Milli(),
- 'U' : Micro(),
- None : None,
+ 'D': Day(),
+ 'B': BDay(),
+ 'H': Hour(),
+ 'T': Minute(),
+ 'S': Second(),
+ 'L': Milli(),
+ 'U': Micro(),
+ None: None,
# Monthly - Calendar
- 'M' : MonthEnd(),
- 'MS' : MonthBegin(),
+ 'M': MonthEnd(),
+ 'MS': MonthBegin(),
# Monthly - Business
- 'BM' : BMonthEnd(),
- 'BMS' : BMonthBegin(),
+ 'BM': BMonthEnd(),
+ 'BMS': BMonthBegin(),
# Annual - Calendar
- 'A-JAN' : YearEnd(month=1),
- 'A-FEB' : YearEnd(month=2),
- 'A-MAR' : YearEnd(month=3),
- 'A-APR' : YearEnd(month=4),
- 'A-MAY' : YearEnd(month=5),
- 'A-JUN' : YearEnd(month=6),
- 'A-JUL' : YearEnd(month=7),
- 'A-AUG' : YearEnd(month=8),
- 'A-SEP' : YearEnd(month=9),
- 'A-OCT' : YearEnd(month=10),
- 'A-NOV' : YearEnd(month=11),
- 'A-DEC' : YearEnd(month=12),
+ 'A-JAN': YearEnd(month=1),
+ 'A-FEB': YearEnd(month=2),
+ 'A-MAR': YearEnd(month=3),
+ 'A-APR': YearEnd(month=4),
+ 'A-MAY': YearEnd(month=5),
+ 'A-JUN': YearEnd(month=6),
+ 'A-JUL': YearEnd(month=7),
+ 'A-AUG': YearEnd(month=8),
+ 'A-SEP': YearEnd(month=9),
+ 'A-OCT': YearEnd(month=10),
+ 'A-NOV': YearEnd(month=11),
+ 'A-DEC': YearEnd(month=12),
# Annual - Calendar (start)
- 'AS-JAN' : YearBegin(month=1),
- 'AS-FEB' : YearBegin(month=2),
- 'AS-MAR' : YearBegin(month=3),
- 'AS-APR' : YearBegin(month=4),
- 'AS-MAY' : YearBegin(month=5),
- 'AS-JUN' : YearBegin(month=6),
- 'AS-JUL' : YearBegin(month=7),
- 'AS-AUG' : YearBegin(month=8),
- 'AS-SEP' : YearBegin(month=9),
- 'AS-OCT' : YearBegin(month=10),
- 'AS-NOV' : YearBegin(month=11),
- 'AS-DEC' : YearBegin(month=12),
+ 'AS-JAN': YearBegin(month=1),
+ 'AS-FEB': YearBegin(month=2),
+ 'AS-MAR': YearBegin(month=3),
+ 'AS-APR': YearBegin(month=4),
+ 'AS-MAY': YearBegin(month=5),
+ 'AS-JUN': YearBegin(month=6),
+ 'AS-JUL': YearBegin(month=7),
+ 'AS-AUG': YearBegin(month=8),
+ 'AS-SEP': YearBegin(month=9),
+ 'AS-OCT': YearBegin(month=10),
+ 'AS-NOV': YearBegin(month=11),
+ 'AS-DEC': YearBegin(month=12),
# Annual - Business
- 'BA-JAN' : BYearEnd(month=1),
- 'BA-FEB' : BYearEnd(month=2),
- 'BA-MAR' : BYearEnd(month=3),
- 'BA-APR' : BYearEnd(month=4),
- 'BA-MAY' : BYearEnd(month=5),
- 'BA-JUN' : BYearEnd(month=6),
- 'BA-JUL' : BYearEnd(month=7),
- 'BA-AUG' : BYearEnd(month=8),
- 'BA-SEP' : BYearEnd(month=9),
- 'BA-OCT' : BYearEnd(month=10),
- 'BA-NOV' : BYearEnd(month=11),
- 'BA-DEC' : BYearEnd(month=12),
+ 'BA-JAN': BYearEnd(month=1),
+ 'BA-FEB': BYearEnd(month=2),
+ 'BA-MAR': BYearEnd(month=3),
+ 'BA-APR': BYearEnd(month=4),
+ 'BA-MAY': BYearEnd(month=5),
+ 'BA-JUN': BYearEnd(month=6),
+ 'BA-JUL': BYearEnd(month=7),
+ 'BA-AUG': BYearEnd(month=8),
+ 'BA-SEP': BYearEnd(month=9),
+ 'BA-OCT': BYearEnd(month=10),
+ 'BA-NOV': BYearEnd(month=11),
+ 'BA-DEC': BYearEnd(month=12),
# Annual - Business (Start)
- 'BAS-JAN' : BYearBegin(month=1),
- 'BAS-FEB' : BYearBegin(month=2),
- 'BAS-MAR' : BYearBegin(month=3),
- 'BAS-APR' : BYearBegin(month=4),
- 'BAS-MAY' : BYearBegin(month=5),
- 'BAS-JUN' : BYearBegin(month=6),
- 'BAS-JUL' : BYearBegin(month=7),
- 'BAS-AUG' : BYearBegin(month=8),
- 'BAS-SEP' : BYearBegin(month=9),
- 'BAS-OCT' : BYearBegin(month=10),
- 'BAS-NOV' : BYearBegin(month=11),
- 'BAS-DEC' : BYearBegin(month=12),
+ 'BAS-JAN': BYearBegin(month=1),
+ 'BAS-FEB': BYearBegin(month=2),
+ 'BAS-MAR': BYearBegin(month=3),
+ 'BAS-APR': BYearBegin(month=4),
+ 'BAS-MAY': BYearBegin(month=5),
+ 'BAS-JUN': BYearBegin(month=6),
+ 'BAS-JUL': BYearBegin(month=7),
+ 'BAS-AUG': BYearBegin(month=8),
+ 'BAS-SEP': BYearBegin(month=9),
+ 'BAS-OCT': BYearBegin(month=10),
+ 'BAS-NOV': BYearBegin(month=11),
+ 'BAS-DEC': BYearBegin(month=12),
# Quarterly - Calendar
# 'Q' : QuarterEnd(startingMonth=3),
- 'Q-JAN' : QuarterEnd(startingMonth=1),
- 'Q-FEB' : QuarterEnd(startingMonth=2),
- 'Q-MAR' : QuarterEnd(startingMonth=3),
- 'Q-APR' : QuarterEnd(startingMonth=4),
- 'Q-MAY' : QuarterEnd(startingMonth=5),
- 'Q-JUN' : QuarterEnd(startingMonth=6),
- 'Q-JUL' : QuarterEnd(startingMonth=7),
- 'Q-AUG' : QuarterEnd(startingMonth=8),
- 'Q-SEP' : QuarterEnd(startingMonth=9),
- 'Q-OCT' : QuarterEnd(startingMonth=10),
- 'Q-NOV' : QuarterEnd(startingMonth=11),
- 'Q-DEC' : QuarterEnd(startingMonth=12),
+ 'Q-JAN': QuarterEnd(startingMonth=1),
+ 'Q-FEB': QuarterEnd(startingMonth=2),
+ 'Q-MAR': QuarterEnd(startingMonth=3),
+ 'Q-APR': QuarterEnd(startingMonth=4),
+ 'Q-MAY': QuarterEnd(startingMonth=5),
+ 'Q-JUN': QuarterEnd(startingMonth=6),
+ 'Q-JUL': QuarterEnd(startingMonth=7),
+ 'Q-AUG': QuarterEnd(startingMonth=8),
+ 'Q-SEP': QuarterEnd(startingMonth=9),
+ 'Q-OCT': QuarterEnd(startingMonth=10),
+ 'Q-NOV': QuarterEnd(startingMonth=11),
+ 'Q-DEC': QuarterEnd(startingMonth=12),
# Quarterly - Calendar (Start)
- 'QS' : QuarterBegin(startingMonth=1),
- 'QS-JAN' : QuarterBegin(startingMonth=1),
- 'QS-FEB' : QuarterBegin(startingMonth=2),
- 'QS-MAR' : QuarterBegin(startingMonth=3),
- 'QS-APR' : QuarterBegin(startingMonth=4),
- 'QS-MAY' : QuarterBegin(startingMonth=5),
- 'QS-JUN' : QuarterBegin(startingMonth=6),
- 'QS-JUL' : QuarterBegin(startingMonth=7),
- 'QS-AUG' : QuarterBegin(startingMonth=8),
- 'QS-SEP' : QuarterBegin(startingMonth=9),
- 'QS-OCT' : QuarterBegin(startingMonth=10),
- 'QS-NOV' : QuarterBegin(startingMonth=11),
- 'QS-DEC' : QuarterBegin(startingMonth=12),
+ 'QS': QuarterBegin(startingMonth=1),
+ 'QS-JAN': QuarterBegin(startingMonth=1),
+ 'QS-FEB': QuarterBegin(startingMonth=2),
+ 'QS-MAR': QuarterBegin(startingMonth=3),
+ 'QS-APR': QuarterBegin(startingMonth=4),
+ 'QS-MAY': QuarterBegin(startingMonth=5),
+ 'QS-JUN': QuarterBegin(startingMonth=6),
+ 'QS-JUL': QuarterBegin(startingMonth=7),
+ 'QS-AUG': QuarterBegin(startingMonth=8),
+ 'QS-SEP': QuarterBegin(startingMonth=9),
+ 'QS-OCT': QuarterBegin(startingMonth=10),
+ 'QS-NOV': QuarterBegin(startingMonth=11),
+ 'QS-DEC': QuarterBegin(startingMonth=12),
# Quarterly - Business
- 'BQ-JAN' : BQuarterEnd(startingMonth=1),
- 'BQ-FEB' : BQuarterEnd(startingMonth=2),
- 'BQ-MAR' : BQuarterEnd(startingMonth=3),
-
- 'BQ' : BQuarterEnd(startingMonth=12),
- 'BQ-APR' : BQuarterEnd(startingMonth=4),
- 'BQ-MAY' : BQuarterEnd(startingMonth=5),
- 'BQ-JUN' : BQuarterEnd(startingMonth=6),
- 'BQ-JUL' : BQuarterEnd(startingMonth=7),
- 'BQ-AUG' : BQuarterEnd(startingMonth=8),
- 'BQ-SEP' : BQuarterEnd(startingMonth=9),
- 'BQ-OCT' : BQuarterEnd(startingMonth=10),
- 'BQ-NOV' : BQuarterEnd(startingMonth=11),
- 'BQ-DEC' : BQuarterEnd(startingMonth=12),
+ 'BQ-JAN': BQuarterEnd(startingMonth=1),
+ 'BQ-FEB': BQuarterEnd(startingMonth=2),
+ 'BQ-MAR': BQuarterEnd(startingMonth=3),
+
+ 'BQ': BQuarterEnd(startingMonth=12),
+ 'BQ-APR': BQuarterEnd(startingMonth=4),
+ 'BQ-MAY': BQuarterEnd(startingMonth=5),
+ 'BQ-JUN': BQuarterEnd(startingMonth=6),
+ 'BQ-JUL': BQuarterEnd(startingMonth=7),
+ 'BQ-AUG': BQuarterEnd(startingMonth=8),
+ 'BQ-SEP': BQuarterEnd(startingMonth=9),
+ 'BQ-OCT': BQuarterEnd(startingMonth=10),
+ 'BQ-NOV': BQuarterEnd(startingMonth=11),
+ 'BQ-DEC': BQuarterEnd(startingMonth=12),
# Quarterly - Business (Start)
- 'BQS-JAN' : BQuarterBegin(startingMonth=1),
- 'BQS' : BQuarterBegin(startingMonth=1),
- 'BQS-FEB' : BQuarterBegin(startingMonth=2),
- 'BQS-MAR' : BQuarterBegin(startingMonth=3),
- 'BQS-APR' : BQuarterBegin(startingMonth=4),
- 'BQS-MAY' : BQuarterBegin(startingMonth=5),
- 'BQS-JUN' : BQuarterBegin(startingMonth=6),
- 'BQS-JUL' : BQuarterBegin(startingMonth=7),
- 'BQS-AUG' : BQuarterBegin(startingMonth=8),
- 'BQS-SEP' : BQuarterBegin(startingMonth=9),
- 'BQS-OCT' : BQuarterBegin(startingMonth=10),
- 'BQS-NOV' : BQuarterBegin(startingMonth=11),
- 'BQS-DEC' : BQuarterBegin(startingMonth=12),
+ 'BQS-JAN': BQuarterBegin(startingMonth=1),
+ 'BQS': BQuarterBegin(startingMonth=1),
+ 'BQS-FEB': BQuarterBegin(startingMonth=2),
+ 'BQS-MAR': BQuarterBegin(startingMonth=3),
+ 'BQS-APR': BQuarterBegin(startingMonth=4),
+ 'BQS-MAY': BQuarterBegin(startingMonth=5),
+ 'BQS-JUN': BQuarterBegin(startingMonth=6),
+ 'BQS-JUL': BQuarterBegin(startingMonth=7),
+ 'BQS-AUG': BQuarterBegin(startingMonth=8),
+ 'BQS-SEP': BQuarterBegin(startingMonth=9),
+ 'BQS-OCT': BQuarterBegin(startingMonth=10),
+ 'BQS-NOV': BQuarterBegin(startingMonth=11),
+ 'BQS-DEC': BQuarterBegin(startingMonth=12),
# Weekly
- 'W-MON' : Week(weekday=0),
- 'W-TUE' : Week(weekday=1),
- 'W-WED' : Week(weekday=2),
- 'W-THU' : Week(weekday=3),
- 'W-FRI' : Week(weekday=4),
- 'W-SAT' : Week(weekday=5),
- 'W-SUN' : Week(weekday=6),
+ 'W-MON': Week(weekday=0),
+ 'W-TUE': Week(weekday=1),
+ 'W-WED': Week(weekday=2),
+ 'W-THU': Week(weekday=3),
+ 'W-FRI': Week(weekday=4),
+ 'W-SAT': Week(weekday=5),
+ 'W-SUN': Week(weekday=6),
}
_offset_to_period_map = {
- 'WEEKDAY' : 'D',
- 'EOM' : 'M',
- 'BM' : 'M',
- 'BQS' : 'Q',
- 'QS' : 'Q',
- 'BQ' : 'Q',
- 'BA' : 'A',
- 'AS' : 'A',
- 'BAS' : 'A',
- 'MS' : 'M',
- 'D' : 'D',
- 'B' : 'B',
- 'T' : 'T',
- 'S' : 'S',
- 'H' : 'H',
- 'Q' : 'Q',
- 'A' : 'A',
- 'W' : 'W',
- 'M' : 'M'
+ 'WEEKDAY': 'D',
+ 'EOM': 'M',
+ 'BM': 'M',
+ 'BQS': 'Q',
+ 'QS': 'Q',
+ 'BQ': 'Q',
+ 'BA': 'A',
+ 'AS': 'A',
+ 'BAS': 'A',
+ 'MS': 'M',
+ 'D': 'D',
+ 'B': 'B',
+ 'T': 'T',
+ 'S': 'S',
+ 'H': 'H',
+ 'Q': 'Q',
+ 'A': 'A',
+ 'W': 'W',
+ 'M': 'M'
}
need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS']
@@ -276,6 +281,7 @@ def _get_freq_str(base, mult=1):
for _d in _days:
_offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
+
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
@@ -299,25 +305,25 @@ def get_period_alias(offset_str):
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
- 'Q' : 'Q-DEC',
-
- 'A' : 'A-DEC', # YearEnd(month=12),
- 'AS' : 'AS-JAN', # YearBegin(month=1),
- 'BA' : 'BA-DEC', # BYearEnd(month=12),
- 'BAS' : 'BAS-JAN', # BYearBegin(month=1),
-
- 'A@JAN' : 'BA-JAN',
- 'A@FEB' : 'BA-FEB',
- 'A@MAR' : 'BA-MAR',
- 'A@APR' : 'BA-APR',
- 'A@MAY' : 'BA-MAY',
- 'A@JUN' : 'BA-JUN',
- 'A@JUL' : 'BA-JUL',
- 'A@AUG' : 'BA-AUG',
- 'A@SEP' : 'BA-SEP',
- 'A@OCT' : 'BA-OCT',
- 'A@NOV' : 'BA-NOV',
- 'A@DEC' : 'BA-DEC',
+ 'Q': 'Q-DEC',
+
+ 'A': 'A-DEC', # YearEnd(month=12),
+ 'AS': 'AS-JAN', # YearBegin(month=1),
+ 'BA': 'BA-DEC', # BYearEnd(month=12),
+ 'BAS': 'BAS-JAN', # BYearBegin(month=1),
+
+ 'A@JAN': 'BA-JAN',
+ 'A@FEB': 'BA-FEB',
+ 'A@MAR': 'BA-MAR',
+ 'A@APR': 'BA-APR',
+ 'A@MAY': 'BA-MAY',
+ 'A@JUN': 'BA-JUN',
+ 'A@JUL': 'BA-JUL',
+ 'A@AUG': 'BA-AUG',
+ 'A@SEP': 'BA-SEP',
+ 'A@OCT': 'BA-OCT',
+ 'A@NOV': 'BA-NOV',
+ 'A@DEC': 'BA-DEC',
# lite aliases
'Min': 'T',
@@ -407,6 +413,7 @@ def to_offset(freqstr):
# hack to handle WOM-1MON
opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-@]\d*[A-Za-z]+)?)')
+
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
@@ -431,6 +438,7 @@ def _base_and_stride(freqstr):
return (base, stride)
+
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
@@ -473,6 +481,7 @@ def get_offset(name):
def hasOffsetName(offset):
return offset in _offset_names
+
def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
@@ -488,6 +497,7 @@ def get_offset_name(offset):
else:
raise Exception('Bad rule given: %s!' % offset)
+
def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
@@ -497,6 +507,7 @@ def get_legacy_offset_name(offset):
get_offset_name = get_offset_name
+
def get_standard_freq(freq):
"""
Return the standardized frequency string
@@ -518,49 +529,49 @@ def get_standard_freq(freq):
_period_code_map = {
# Annual freqs with various fiscal year ends.
# eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
- "A-DEC" : 1000, # Annual - December year end
- "A-JAN" : 1001, # Annual - January year end
- "A-FEB" : 1002, # Annual - February year end
- "A-MAR" : 1003, # Annual - March year end
- "A-APR" : 1004, # Annual - April year end
- "A-MAY" : 1005, # Annual - May year end
- "A-JUN" : 1006, # Annual - June year end
- "A-JUL" : 1007, # Annual - July year end
- "A-AUG" : 1008, # Annual - August year end
- "A-SEP" : 1009, # Annual - September year end
- "A-OCT" : 1010, # Annual - October year end
- "A-NOV" : 1011, # Annual - November year end
+ "A-DEC": 1000, # Annual - December year end
+ "A-JAN": 1001, # Annual - January year end
+ "A-FEB": 1002, # Annual - February year end
+ "A-MAR": 1003, # Annual - March year end
+ "A-APR": 1004, # Annual - April year end
+ "A-MAY": 1005, # Annual - May year end
+ "A-JUN": 1006, # Annual - June year end
+ "A-JUL": 1007, # Annual - July year end
+ "A-AUG": 1008, # Annual - August year end
+ "A-SEP": 1009, # Annual - September year end
+ "A-OCT": 1010, # Annual - October year end
+ "A-NOV": 1011, # Annual - November year end
# Quarterly frequencies with various fiscal year ends.
# eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
- "Q-DEC" : 2000 , # Quarterly - December year end
- "Q-JAN" : 2001, # Quarterly - January year end
- "Q-FEB" : 2002, # Quarterly - February year end
- "Q-MAR" : 2003, # Quarterly - March year end
- "Q-APR" : 2004, # Quarterly - April year end
- "Q-MAY" : 2005, # Quarterly - May year end
- "Q-JUN" : 2006, # Quarterly - June year end
- "Q-JUL" : 2007, # Quarterly - July year end
- "Q-AUG" : 2008, # Quarterly - August year end
- "Q-SEP" : 2009, # Quarterly - September year end
- "Q-OCT" : 2010, # Quarterly - October year end
- "Q-NOV" : 2011, # Quarterly - November year end
-
- "M" : 3000, # Monthly
-
- "W-SUN" : 4000, # Weekly - Sunday end of week
- "W-MON" : 4001, # Weekly - Monday end of week
- "W-TUE" : 4002, # Weekly - Tuesday end of week
- "W-WED" : 4003, # Weekly - Wednesday end of week
- "W-THU" : 4004, # Weekly - Thursday end of week
- "W-FRI" : 4005, # Weekly - Friday end of week
- "W-SAT" : 4006, # Weekly - Saturday end of week
-
- "B" : 5000, # Business days
- "D" : 6000, # Daily
- "H" : 7000, # Hourly
- "T" : 8000, # Minutely
- "S" : 9000, # Secondly
+ "Q-DEC": 2000, # Quarterly - December year end
+ "Q-JAN": 2001, # Quarterly - January year end
+ "Q-FEB": 2002, # Quarterly - February year end
+ "Q-MAR": 2003, # Quarterly - March year end
+ "Q-APR": 2004, # Quarterly - April year end
+ "Q-MAY": 2005, # Quarterly - May year end
+ "Q-JUN": 2006, # Quarterly - June year end
+ "Q-JUL": 2007, # Quarterly - July year end
+ "Q-AUG": 2008, # Quarterly - August year end
+ "Q-SEP": 2009, # Quarterly - September year end
+ "Q-OCT": 2010, # Quarterly - October year end
+ "Q-NOV": 2011, # Quarterly - November year end
+
+ "M": 3000, # Monthly
+
+ "W-SUN": 4000, # Weekly - Sunday end of week
+ "W-MON": 4001, # Weekly - Monday end of week
+ "W-TUE": 4002, # Weekly - Tuesday end of week
+ "W-WED": 4003, # Weekly - Wednesday end of week
+ "W-THU": 4004, # Weekly - Thursday end of week
+ "W-FRI": 4005, # Weekly - Friday end of week
+ "W-SAT": 4006, # Weekly - Saturday end of week
+
+ "B": 5000, # Business days
+ "D": 6000, # Daily
+ "H": 7000, # Hourly
+ "T": 8000, # Minutely
+ "S": 9000, # Secondly
}
_reverse_period_code_map = {}
@@ -569,11 +580,12 @@ def get_standard_freq(freq):
# Additional aliases
_period_code_map.update({
- "Q" : 2000, # Quarterly - December year end (default quarterly)
- "A" : 1000, # Annual
- "W" : 4000, # Weekly
+ "Q": 2000, # Quarterly - December year end (default quarterly)
+ "A": 1000, # Annual
+ "W": 4000, # Weekly
})
+
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
@@ -613,18 +625,18 @@ def _period_alias_dictionary():
"QTR-E", "QUARTER-E", "QUARTERLY-E"]
month_names = [
- [ "DEC", "DECEMBER" ],
- [ "JAN", "JANUARY" ],
- [ "FEB", "FEBRUARY" ],
- [ "MAR", "MARCH" ],
- [ "APR", "APRIL" ],
- [ "MAY", "MAY" ],
- [ "JUN", "JUNE" ],
- [ "JUL", "JULY" ],
- [ "AUG", "AUGUST" ],
- [ "SEP", "SEPTEMBER" ],
- [ "OCT", "OCTOBER" ],
- [ "NOV", "NOVEMBER" ] ]
+ ["DEC", "DECEMBER"],
+ ["JAN", "JANUARY"],
+ ["FEB", "FEBRUARY"],
+ ["MAR", "MARCH"],
+ ["APR", "APRIL"],
+ ["MAY", "MAY"],
+ ["JUN", "JUNE"],
+ ["JUL", "JULY"],
+ ["AUG", "AUGUST"],
+ ["SEP", "SEPTEMBER"],
+ ["OCT", "OCTOBER"],
+ ["NOV", "NOVEMBER"]]
seps = ["@", "-"]
@@ -647,13 +659,13 @@ def _period_alias_dictionary():
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
- [ "SUN", "SUNDAY" ],
- [ "MON", "MONDAY" ],
- [ "TUE", "TUESDAY" ],
- [ "WED", "WEDNESDAY" ],
- [ "THU", "THURSDAY" ],
- [ "FRI", "FRIDAY" ],
- [ "SAT", "SATURDAY" ] ]
+ ["SUN", "SUNDAY"],
+ ["MON", "MONDAY"],
+ ["TUE", "TUESDAY"],
+ ["WED", "WEDNESDAY"],
+ ["THU", "THURSDAY"],
+ ["FRI", "FRIDAY"],
+ ["SAT", "SATURDAY"]]
for k in W_prefixes:
alias_dict[k] = 'W'
@@ -666,24 +678,27 @@ def _period_alias_dictionary():
return alias_dict
_reso_period_map = {
- "year" : "A",
- "quarter" : "Q",
- "month" : "M",
- "day" : "D",
- "hour" : "H",
- "minute" : "T",
- "second" : "S",
+ "year": "A",
+ "quarter": "Q",
+ "month": "M",
+ "day": "D",
+ "hour": "H",
+ "minute": "T",
+ "second": "S",
}
+
def _infer_period_group(freqstr):
return _period_group(_reso_period_map[freqstr])
+
def _period_group(freqstr):
base, mult = get_freq_code(freqstr)
return base // 1000 * 1000
_period_alias_dict = _period_alias_dictionary()
+
def _period_str_to_code(freqstr):
# hack
freqstr = _rule_aliases.get(freqstr, freqstr)
@@ -697,7 +712,6 @@ def _period_str_to_code(freqstr):
return _period_code_map[alias]
-
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
@@ -727,6 +741,7 @@ def infer_freq(index, warn=True):
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
+
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
@@ -853,7 +868,7 @@ def _infer_daily_rule(self):
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
- mod_dict = {0 : 12, 2 : 11, 1 : 10}
+ mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
return _maybe_add_count('%s-%s' % (quarterly_rule, month),
nquarters)
@@ -907,12 +922,14 @@ def _get_monthly_rule(self):
import pandas.core.algorithms as algos
+
def _maybe_add_count(base, count):
if count > 1:
return '%d%s' % (count, base)
else:
return base
+
def is_subperiod(source, target):
"""
Returns True if downsampling is possible between source and target
@@ -959,6 +976,7 @@ def is_subperiod(source, target):
elif target == 'S':
return source in ['S']
+
def is_superperiod(source, target):
"""
Returns True if upsampling is possible between source and target
@@ -1009,6 +1027,7 @@ def is_superperiod(source, target):
elif source == 'S':
return target in ['S']
+
def _get_rule_month(source, default='DEC'):
source = source.upper()
if '-' not in source:
@@ -1016,15 +1035,18 @@ def _get_rule_month(source, default='DEC'):
else:
return source.split('-')[1]
+
def _is_annual(rule):
rule = rule.upper()
return rule == 'A' or rule.startswith('A-')
+
def _quarter_months_conform(source, target):
snum = _month_numbers[source]
tnum = _month_numbers[target]
return snum % 3 == tnum % 3
+
def _is_quarterly(rule):
rule = rule.upper()
return rule == 'Q' or rule.startswith('Q-')
@@ -1043,9 +1065,9 @@ def _is_weekly(rule):
_month_numbers = dict((k, i) for i, k in enumerate(MONTHS))
-
_weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS))
_month_aliases = dict((k + 1, v) for k, v in enumerate(MONTHS))
+
def _is_multiple(us, mult):
return us % mult == 0
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6d5fd6f560ffe..597f3573d52dd 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -20,6 +20,7 @@
import pandas.lib as lib
import pandas._algos as _algos
+
def _utc():
import pytz
return pytz.utc
@@ -38,6 +39,7 @@ def f(self):
f.__name__ = name
return property(f)
+
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
@@ -73,6 +75,7 @@ def wrapper(self, other):
return wrapper
+
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
@@ -87,6 +90,7 @@ class TimeSeriesError(Exception):
_NS_DTYPE = np.dtype('M8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
+
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
@@ -115,8 +119,8 @@ class DatetimeIndex(Int64Index):
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
- _left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
- _left_indexer_unique = _join_i8_wrapper(
+ _left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
+ _left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
@@ -308,7 +312,6 @@ def _generate(cls, start, end, periods, name, offset,
else:
_normalized = _normalized and end.time() == _midnight
-
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
@@ -325,7 +328,6 @@ def _generate(cls, start, end, periods, name, offset,
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
-
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
@@ -842,9 +844,11 @@ def _maybe_utc_convert(self, other):
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
- raise Exception('Cannot join tz-naive with tz-aware DatetimeIndex')
+ raise Exception('Cannot join tz-naive with tz-aware '
+ 'DatetimeIndex')
elif other.tz is not None:
- raise Exception('Cannot join tz-naive with tz-aware DatetimeIndex')
+ raise Exception('Cannot join tz-naive with tz-aware '
+ 'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
@@ -920,7 +924,7 @@ def _fast_union(self, other):
freq=left.offset)
def __array_finalize__(self, obj):
- if self.ndim == 0: # pragma: no cover
+ if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
@@ -978,8 +982,8 @@ def intersection(self, other):
def _partial_date_slice(self, reso, parsed):
if not self.is_monotonic:
- raise TimeSeriesError('Partial indexing only valid for ordered time'
- ' series')
+ raise TimeSeriesError('Partial indexing only valid for ordered '
+ 'time series.')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1))
@@ -989,7 +993,7 @@ def _partial_date_slice(self, reso, parsed):
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, parsed.month, d))
elif reso == 'quarter':
- qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
+ qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
@@ -1361,7 +1365,6 @@ def indexer_between_time(self, start_time, end_time, include_start=True,
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
-
if include_start and include_end:
lop = rop = operator.le
elif include_start:
@@ -1523,10 +1526,10 @@ def _to_m8(key):
return np.int64(lib.pydt_to_i8(key)).view(_NS_DTYPE)
-
def _str_to_dt_array(arr, offset=None, dayfirst=None, yearfirst=None):
def parser(x):
- result = parse_time_string(x, offset, dayfirst=dayfirst, yearfirst=None)
+ result = parse_time_string(x, offset, dayfirst=dayfirst,
+ yearfirst=None)
return result[0]
arr = np.asarray(arr, dtype=object)
@@ -1535,7 +1538,7 @@ def parser(x):
_CACHE_START = Timestamp(datetime(1950, 1, 1))
-_CACHE_END = Timestamp(datetime(2030, 1, 1))
+_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
@@ -1548,13 +1551,16 @@ def _naive_in_cache_range(start, end):
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
+
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
+
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
+
def _utc_naive(dt):
if dt is None:
return dt
diff --git a/pandas/tseries/interval.py b/pandas/tseries/interval.py
index 58c16dcf08aca..104e088ee4e84 100644
--- a/pandas/tseries/interval.py
+++ b/pandas/tseries/interval.py
@@ -2,6 +2,7 @@
from pandas.core.index import Index
+
class Interval(object):
"""
Represents an interval of time defined by two timestamps
@@ -11,6 +12,7 @@ def __init__(self, start, end):
self.start = start
self.end = end
+
class PeriodInterval(object):
"""
Represents an interval of time defined by two Period objects (time ordinals)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 05861d93717e5..1e3c17b7ec5ac 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -164,7 +164,7 @@ def __sub__(self, other):
raise TypeError('Cannot subtract datetime from offset!')
elif type(other) == type(self):
return self.__class__(self.n - other.n, **self.kwds)
- else: # pragma: no cover
+ else: # pragma: no cover
raise TypeError('Cannot subtract %s from %s'
% (type(other), type(self)))
@@ -228,6 +228,7 @@ def freqstr(self):
return fstr
+
class BusinessDay(CacheableOffset, DateOffset):
"""
DateOffset subclass representing possibly n business days
@@ -343,6 +344,7 @@ def apply(self, other):
else:
raise Exception('Only know how to combine business day with '
'datetime or timedelta!')
+
@classmethod
def onOffset(cls, dt):
return dt.weekday() < 5
@@ -379,7 +381,7 @@ class MonthBegin(DateOffset, CacheableOffset):
def apply(self, other):
n = self.n
- if other.day > 1 and n <= 0: #then roll forward if n<=0
+ if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
other = other + relativedelta(months=n, day=1)
@@ -436,7 +438,7 @@ def apply(self, other):
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
- other = other + timedelta(days=first-other.day)
+ other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
@@ -525,6 +527,7 @@ def rule_code(self):
6: 'SUN'
}
+
class WeekOfMonth(DateOffset, CacheableOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
@@ -631,7 +634,7 @@ def apply(self, other):
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
- other = other + relativedelta(months=monthsToGo + 3*n, day=31)
+ other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
if other.weekday() > 4:
other = other - BDay()
@@ -686,7 +689,7 @@ def apply(self, other):
monthsSince = (other.month - self.startingMonth) % 3
- if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
+ if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
@@ -697,7 +700,7 @@ def apply(self, other):
n = n - 1
# get the first bday for result
- other = other + relativedelta(months=3*n - monthsSince)
+ other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = lib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
@@ -741,7 +744,7 @@ def apply(self, other):
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
- other = other + relativedelta(months=monthsToGo + 3*n, day=31)
+ other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@@ -783,7 +786,7 @@ def apply(self, other):
# after start, so come back an extra period as if rolled forward
n = n + 1
- other = other + relativedelta(months=3*n - monthsSince, day=1)
+ other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@property
@@ -860,18 +863,17 @@ def apply(self, other):
years = n
-
- if n > 0: # roll back first for positive n
+ if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
- elif n <= 0: # roll forward
+ elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
- other = other + relativedelta(years = years)
+ other = other + relativedelta(years=years)
wkday, days_in_month = lib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first)
@@ -909,6 +911,7 @@ def _increment(date):
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
+
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = lib.monthrange(year, self.month)
@@ -967,7 +970,7 @@ def apply(self, other):
other.microsecond)
if n <= 0:
n = n + 1
- other = other + relativedelta(years = n, day=1)
+ other = other + relativedelta(years=n, day=1)
return other
@classmethod
@@ -1039,10 +1042,12 @@ def apply(self, other):
raise TypeError('Unhandled type: %s' % type(other))
_rule_base = 'undefined'
+
@property
def rule_code(self):
return self._rule_base
+
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
@@ -1064,6 +1069,7 @@ def _delta_to_tick(delta):
else: # pragma: no cover
return Nano(nanos)
+
def _delta_to_nanoseconds(delta):
if isinstance(delta, Tick):
delta = delta.delta
@@ -1071,6 +1077,7 @@ def _delta_to_nanoseconds(delta):
+ delta.seconds * 1000000
+ delta.microseconds) * 1000
+
class Day(Tick, CacheableOffset):
_inc = timedelta(1)
_rule_base = 'D'
@@ -1079,25 +1086,31 @@ def isAnchored(self):
return False
+
class Hour(Tick):
_inc = timedelta(0, 3600)
_rule_base = 'H'
+
class Minute(Tick):
_inc = timedelta(0, 60)
_rule_base = 'T'
+
class Second(Tick):
_inc = timedelta(0, 1)
_rule_base = 'S'
+
class Milli(Tick):
_rule_base = 'L'
+
class Micro(Tick):
_inc = timedelta(microseconds=1)
_rule_base = 'U'
+
class Nano(Tick):
_inc = 1
_rule_base = 'N'
@@ -1114,9 +1127,9 @@ def _get_firstbday(wkday):
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
- if wkday == 5: # on Saturday
+ if wkday == 5: # on Saturday
first = 3
- elif wkday == 6: # on Sunday
+ elif wkday == 6: # on Sunday
first = 2
return first
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 88991b57d67d3..d7557e38c1680 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -28,6 +28,7 @@ def f(self):
f.__name__ = name
return property(f)
+
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
@@ -386,6 +387,7 @@ def strftime(self, fmt):
base, mult = _gfc(self.freq)
return plib.period_format(self.ordinal, base, fmt)
+
def _get_date_and_freq(value, freq):
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
@@ -418,6 +420,7 @@ def _get_ordinals(data, freq):
else:
return lib.map_infer(data, f)
+
def dt64arr_to_periodarr(data, freq):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
@@ -828,7 +831,7 @@ def get_value(self, series, key):
# if our data is higher resolution than requested key, slice
if grp < freqn:
- iv = Period(asdt, freq=(grp,1))
+ iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
@@ -836,7 +839,7 @@ def get_value(self, series, key):
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
- key = slice(pos[0], pos[1]+1)
+ key = slice(pos[0], pos[1] + 1)
return series[key]
else:
key = Period(asdt, freq=self.freq)
@@ -993,7 +996,7 @@ def format(self, name=False):
return header + ['%s' % Period(x, freq=self.freq) for x in self]
def __array_finalize__(self, obj):
- if self.ndim == 0: # pragma: no cover
+ if self.ndim == 0: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
@@ -1088,10 +1091,11 @@ def _get_ordinal_range(start, end, periods, freq):
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
- data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
+ data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
+
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
@@ -1131,6 +1135,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None,
return np.array(ordinals, dtype=np.int64), freq
+
def _make_field_arrays(*fields):
length = None
for x in fields:
@@ -1157,6 +1162,7 @@ def _ordinal_from_fields(year, month, quarter, day, hour, minute,
return plib.period_ordinal(year, month, day, hour, minute, second, base)
+
def _quarter_to_myear(year, quarter, freq):
if quarter is not None:
if quarter <= 0 or quarter > 4:
@@ -1179,9 +1185,11 @@ def _validate_end_alias(how):
raise ValueError('How must be one of S or E')
return how
+
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
+
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
@@ -1206,6 +1214,7 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None):
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
+
def _period_rule_to_timestamp_rule(freq, how='end'):
how = how.lower()
if how in ('end', 'e'):
diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py
index 6f1772dd364a6..70b36ff7ef8c7 100644
--- a/pandas/tseries/plotting.py
+++ b/pandas/tseries/plotting.py
@@ -26,6 +26,7 @@
#----------------------------------------------------------------------
# Plotting functions and monkey patches
+
def tsplot(series, plotf, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
@@ -49,7 +50,7 @@ def tsplot(series, plotf, **kwargs):
freq = _get_freq(ax, series)
# resample against axes freq if necessary
- if freq is None: # pragma: no cover
+ if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
else:
# Convert DatetimeIndex to PeriodIndex
@@ -74,7 +75,7 @@ def tsplot(series, plotf, **kwargs):
if style is not None:
args.append(style)
- lines = plotf(ax, *args, **kwargs)
+ lines = plotf(ax, *args, **kwargs)
label = kwargs.get('label', None)
# set date formatter, locators and rescale limits
@@ -84,14 +85,15 @@ def tsplot(series, plotf, **kwargs):
return lines
+
def _maybe_resample(series, ax, freq, plotf, kwargs):
ax_freq = _get_ax_freq(ax)
if ax_freq is not None and freq != ax_freq:
- if frequencies.is_superperiod(freq, ax_freq): # upsample input
+ if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq)
freq = ax_freq
- elif _is_sup(freq, ax_freq): # one is weekly
+ elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = series.resample('D', how=how).dropna()
series = series.resample(ax_freq, how=how).dropna()
@@ -103,6 +105,7 @@ def _maybe_resample(series, ax, freq, plotf, kwargs):
raise ValueError('Incompatible frequency conversion')
return freq, ax_freq, series
+
def _get_ax_freq(ax):
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
@@ -112,14 +115,17 @@ def _get_ax_freq(ax):
ax_freq = getattr(ax.right_ax, 'freq', None)
return ax_freq
+
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
+
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
+
def _upsample_others(ax, freq, plotf, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, plotf, kwargs)
@@ -142,6 +148,7 @@ def _upsample_others(ax, freq, plotf, kwargs):
title = None
ax.legend(lines, labels, loc='best', title=title)
+
def _replot_ax(ax, freq, plotf, kwargs):
data = getattr(ax, '_plot_data', None)
ax._plot_data = []
@@ -162,6 +169,7 @@ def _replot_ax(ax, freq, plotf, kwargs):
return lines, labels
+
def _decorate_axes(ax, freq, kwargs):
ax.freq = freq
xaxis = ax.get_xaxis()
@@ -173,6 +181,7 @@ def _decorate_axes(ax, freq, kwargs):
ax.view_interval = None
ax.date_axis_info = None
+
def _maybe_mask(series):
mask = isnull(series)
if mask.any():
@@ -183,6 +192,7 @@ def _maybe_mask(series):
args = [series.index, series]
return args
+
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
@@ -205,6 +215,7 @@ def _get_freq(ax, series):
return freq
+
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
@@ -213,6 +224,7 @@ def _get_xlim(lines):
right = max(x[-1].ordinal, right)
return left, right
+
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
@@ -228,6 +240,7 @@ def get_datevalue(date, freq):
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
+
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index be5098dede15a..1fb1725f183c7 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -184,7 +184,7 @@ def _resample_timestamps(self, obj):
# Determine if we're downsampling
if axlabels.freq is not None or axlabels.inferred_freq is not None:
if len(grouper.binlabels) < len(axlabels) or self.how is not None:
- grouped = obj.groupby(grouper, axis=self.axis)
+ grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
else:
# upsampling shortcut
@@ -193,7 +193,7 @@ def _resample_timestamps(self, obj):
limit=self.limit)
else:
# Irregular data, have to use groupby
- grouped = obj.groupby(grouper, axis=self.axis)
+ grouped = obj.groupby(grouper, axis=self.axis)
result = grouped.aggregate(self._agg_method)
if self.fill_method is not None:
@@ -265,7 +265,6 @@ def _take_new_index(obj, indexer, new_index, axis=0):
raise NotImplementedError
-
def _get_range_edges(axis, offset, closed='left', base=0):
if isinstance(offset, basestring):
offset = to_offset(offset)
@@ -278,7 +277,7 @@ def _get_range_edges(axis, offset, closed='left', base=0):
closed=closed, base=base)
first, last = axis[0], axis[-1]
- if not isinstance(offset, Tick):# and first.time() != last.time():
+ if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = tools.normalize_date(first)
last = tools.normalize_date(last)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 36a9f32bd04c4..9e1c451c42887 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -17,9 +17,10 @@
dateutil.__version__ == '2.0'): # pragma: no cover
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
-except ImportError: # pragma: no cover
+except ImportError: # pragma: no cover
print 'Please install python-dateutil via easy_install or some method!'
- raise # otherwise a 2nd import won't show the message
+ raise # otherwise a 2nd import won't show the message
+
def _infer_tzinfo(start, end):
def _infer(a, b):
@@ -124,7 +125,6 @@ class DateParseError(ValueError):
pass
-
# patterns for quarters like '4Q2005', '05Q1'
qpat1full = re.compile(r'(\d)Q(\d\d\d\d)')
qpat2full = re.compile(r'(\d\d\d\d)Q(\d)')
@@ -132,6 +132,7 @@ class DateParseError(ValueError):
qpat2 = re.compile(r'(\d\d)Q(\d)')
ypat = re.compile(r'(\d\d\d\d)$')
+
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
"""
Try hard to parse datetime string, leveraging dateutil plus some extra
@@ -161,7 +162,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
arg = arg.upper()
- default = datetime(1,1,1).replace(hour=0, minute=0,
+ default = datetime(1, 1, 1).replace(hour=0, minute=0,
second=0, microsecond=0)
# special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
@@ -239,7 +240,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
"minute", "second", "microsecond"]:
can_be_zero = ['hour', 'minute', 'second', 'microsecond']
value = getattr(parsed, attr)
- if value is not None and value != 0: # or attr in can_be_zero):
+ if value is not None and value != 0: # or attr in can_be_zero):
repl[attr] = value
if not stopped:
reso = attr
@@ -249,6 +250,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
ret = default.replace(**repl)
return ret, parsed, reso # datetime, resolution
+
def _attempt_monthly(val):
pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']
for pat in pats:
@@ -269,7 +271,7 @@ def _try_parse_monthly(arg):
add_base = True
y = int(arg[:2])
m = int(arg[2:4])
- elif len(arg) >= 6: # 201201
+ elif len(arg) >= 6: # 201201
y = int(arg[:4])
m = int(arg[4:6])
if add_base:
@@ -287,6 +289,7 @@ def format(dt):
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
+
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py
index 4b29771233c50..0702bc40389c9 100644
--- a/pandas/tseries/util.py
+++ b/pandas/tseries/util.py
@@ -3,6 +3,7 @@
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
+
def pivot_annual(series, freq=None):
"""
Group a series by years, taking leap years into account.
@@ -71,6 +72,7 @@ def pivot_annual(series, freq=None):
return DataFrame(values, index=years, columns=columns)
+
def isleapyear(year):
"""
Returns true if year is a leap year.
diff --git a/pandas/util/clipboard.py b/pandas/util/clipboard.py
index b2180001533bd..4136df072c6b6 100644
--- a/pandas/util/clipboard.py
+++ b/pandas/util/clipboard.py
@@ -7,6 +7,7 @@
import subprocess
import sys
+
def clipboard_get():
""" Get text from the clipboard.
"""
@@ -22,6 +23,7 @@ def clipboard_get():
pass
return tkinter_clipboard_get()
+
def clipboard_set(text):
""" Get text from the clipboard.
"""
@@ -37,6 +39,7 @@ def clipboard_set(text):
pass
xsel_clipboard_set(text)
+
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
@@ -54,6 +57,7 @@ def win32_clipboard_get():
win32clipboard.CloseClipboard()
return text
+
def osx_clipboard_get():
""" Get the clipboard's text on OS X.
"""
@@ -64,6 +68,7 @@ def osx_clipboard_get():
text = text.replace('\r', '\n')
return text
+
def tkinter_clipboard_get():
""" Get the clipboard's text using Tkinter.
@@ -83,6 +88,7 @@ def tkinter_clipboard_get():
root.destroy()
return text
+
def win32_clipboard_set(text):
# idiosyncratic win32 import issues
import pywintypes as _
@@ -94,9 +100,11 @@ def win32_clipboard_set(text):
finally:
win32clipboard.CloseClipboard()
+
def _fix_line_endings(text):
return '\r\n'.join(text.splitlines())
+
def osx_clipboard_set(text):
""" Get the clipboard's text on OS X.
"""
@@ -104,6 +112,7 @@ def osx_clipboard_set(text):
stdin=subprocess.PIPE)
p.communicate(input=text)
+
def xsel_clipboard_set(text):
from subprocess import Popen, PIPE
p = Popen(['xsel', '-bi'], stdin=PIPE)
diff --git a/pandas/util/compat.py b/pandas/util/compat.py
index 213f065523073..894f94d11a8b8 100644
--- a/pandas/util/compat.py
+++ b/pandas/util/compat.py
@@ -9,6 +9,6 @@ def product(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
- result = [x+[y] for x in result for y in pool]
+ result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
diff --git a/pandas/util/counter.py b/pandas/util/counter.py
index f23f6e6fbbad1..29e8906fdee38 100644
--- a/pandas/util/counter.py
+++ b/pandas/util/counter.py
@@ -8,9 +8,10 @@
try:
from collections import Mapping
except:
- # ABCs were only introduced in Python 2.6, so this is a hack for Python 2.5:
+ # ABCs were only introduced in Python 2.6, so this is a hack for Python 2.5
Mapping = dict
+
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
@@ -50,8 +51,8 @@ class Counter(dict):
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
- >>> c['b'] -= 2 # reduce the count of 'b' by two
- >>> c.most_common() # 'b' is still in, but its count is zero
+ >>> c['b'] -= 2 # reduce the count of 'b' by two
+ >>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
@@ -67,10 +68,10 @@ def __init__(self, iterable=None, **kwds):
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
- >>> c = Counter() # a new, empty counter
- >>> c = Counter('gallahad') # a new counter from an iterable
- >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
- >>> c = Counter(a=4, b=2) # a new counter from keyword args
+ >>> c = Counter() # a new, empty counter
+ >>> c = Counter('gallahad') # a new counter from an iterable
+ >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
+ >>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
@@ -152,7 +153,8 @@ def update(self, iterable=None, **kwds):
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
- super(Counter, self).update(iterable) # fast path when counter is empty
+ # fast path when counter is empty
+ super(Counter, self).update(iterable)
else:
self_get = self.get
for elem in iterable:
@@ -195,7 +197,9 @@ def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
- 'Like dict.__delitem__() but does not raise KeyError for missing values.'
+ """
+ Like dict.__delitem__() but does not raise KeyError for missing values.
+ """
if elem in self:
super(Counter, self).__delitem__(elem)
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 5cd87a1e9c683..bef3ffc569df1 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -3,8 +3,10 @@
import sys
import warnings
+
def deprecate(name, alternative):
alt_name = alternative.func_name
+
def wrapper(*args, **kwargs):
warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
FutureWarning)
@@ -14,6 +16,7 @@ def wrapper(*args, **kwargs):
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.sourceforge.net/users/license.html
+
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
@@ -66,6 +69,7 @@ def from_params(cls, params):
result.params = params
return result
+
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
@@ -99,12 +103,14 @@ def __call__(self, func):
func.__doc__ = ''.join(docitems)
return func
+
def indent(text, indents=1):
if not text or type(text) != str:
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
+
def suppress_stdout(f):
def wrapped(*args, **kwargs):
try:
@@ -120,6 +126,7 @@ class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
+
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
@@ -163,6 +170,7 @@ def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
+
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
diff --git a/pandas/util/py3compat.py b/pandas/util/py3compat.py
index 9a602155eafd8..0b00e5211daf9 100644
--- a/pandas/util/py3compat.py
+++ b/pandas/util/py3compat.py
@@ -16,6 +16,7 @@ def bytes_to_str(b, encoding='utf-8'):
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
+
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
@@ -34,4 +35,3 @@ def bytes_to_str(b, encoding='ascii'):
from io import BytesIO
except:
from cStringIO import StringIO as BytesIO
-
diff --git a/pandas/util/terminal.py b/pandas/util/terminal.py
index 4278f35ba5019..312f54b521e90 100644
--- a/pandas/util/terminal.py
+++ b/pandas/util/terminal.py
@@ -14,28 +14,29 @@
import os
-__all__=['get_terminal_size']
+__all__ = ['get_terminal_size']
def get_terminal_size():
- import platform
- current_os = platform.system()
- tuple_xy=None
- if current_os == 'Windows':
- tuple_xy = _get_terminal_size_windows()
- if tuple_xy is None:
- tuple_xy = _get_terminal_size_tput()
- # needed for window's python in cygwin's xterm!
- if current_os == 'Linux' or \
- current_os == 'Darwin' or \
- current_os.startswith('CYGWIN'):
- tuple_xy = _get_terminal_size_linux()
- if tuple_xy is None:
- tuple_xy = (80, 25) # default value
- return tuple_xy
+ import platform
+ current_os = platform.system()
+ tuple_xy = None
+ if current_os == 'Windows':
+ tuple_xy = _get_terminal_size_windows()
+ if tuple_xy is None:
+ tuple_xy = _get_terminal_size_tput()
+ # needed for window's python in cygwin's xterm!
+ if current_os == 'Linux' or \
+ current_os == 'Darwin' or \
+ current_os.startswith('CYGWIN'):
+ tuple_xy = _get_terminal_size_linux()
+ if tuple_xy is None:
+ tuple_xy = (80, 25) # default value
+ return tuple_xy
+
def _get_terminal_size_windows():
- res=None
+ res = None
try:
from ctypes import windll, create_string_buffer
@@ -58,32 +59,36 @@ def _get_terminal_size_windows():
else:
return None
+
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width
# -height-of-a-terminal-window
try:
- import subprocess
- proc = subprocess.Popen(["tput", "cols"],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
- output=proc.communicate(input=None)
- cols=int(output[0])
- proc=subprocess.Popen(["tput", "lines"],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
- output=proc.communicate(input=None)
- rows=int(output[0])
- return (cols,rows)
+ import subprocess
+ proc = subprocess.Popen(["tput", "cols"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ output = proc.communicate(input=None)
+ cols = int(output[0])
+ proc = subprocess.Popen(["tput", "lines"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ output = proc.communicate(input=None)
+ rows = int(output[0])
+ return (cols, rows)
except:
- return None
+ return None
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
- import fcntl, termios, struct, os
- cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))
+ import fcntl
+ import termios
+ import struct
+ import os
+ cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
@@ -97,12 +102,12 @@ def ioctl_GWINSZ(fd):
pass
if not cr or cr == (0, 0):
try:
- from os import environ as env
- cr = (env['LINES'], env['COLUMNS'])
+ from os import environ as env
+ cr = (env['LINES'], env['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
- print 'width =', sizex, 'height =', sizey
+ print 'width =', sizex, 'height =', sizey
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 904426731738a..866f39490aecd 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -33,17 +33,20 @@
N = 30
K = 4
+
def rands(n):
choices = string.ascii_letters + string.digits
return ''.join([random.choice(choices) for _ in xrange(n)])
+
def randu(n):
- choices = u"".join(map(unichr,range(1488,1488+26))) + string.digits
+ choices = u"".join(map(unichr, range(1488, 1488 + 26))) + string.digits
return ''.join([random.choice(choices) for _ in xrange(n)])
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Console debugging tools
+
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
@@ -55,10 +58,12 @@ def debug(f, *args, **kwargs):
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
+
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
+
def set_trace():
from IPython.core.debugger import Pdb
try:
@@ -67,17 +72,20 @@ def set_trace():
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
# Comparators
+
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
+
def isiterable(obj):
return hasattr(obj, '__iter__')
+
def assert_almost_equal(a, b):
if isinstance(a, dict) or isinstance(b, dict):
return assert_dict_equal(a, b)
@@ -109,13 +117,15 @@ def assert_almost_equal(a, b):
a, b, decimal=5, err_msg=err_msg(a, b), verbose=False)
else:
np.testing.assert_almost_equal(
- 1, a/b, decimal=5, err_msg=err_msg(a, b), verbose=False)
+ 1, a / b, decimal=5, err_msg=err_msg(a, b), verbose=False)
else:
assert(a == b)
+
def is_sorted(seq):
return assert_almost_equal(seq, np.sort(np.array(seq)))
+
def assert_dict_equal(a, b, compare_keys=True):
a_keys = frozenset(a.keys())
b_keys = frozenset(b.keys())
@@ -126,6 +136,7 @@ def assert_dict_equal(a, b, compare_keys=True):
for k in a_keys:
assert_almost_equal(a[k], b[k])
+
def assert_series_equal(left, right, check_dtype=True,
check_index_type=False,
check_index_freq=False,
@@ -144,6 +155,7 @@ def assert_series_equal(left, right, check_dtype=True,
assert(getattr(left, 'freqstr', None) ==
getattr(right, 'freqstr', None))
+
def assert_frame_equal(left, right, check_index_type=False,
check_column_type=False,
check_frame_type=False):
@@ -167,6 +179,7 @@ def assert_frame_equal(left, right, check_index_type=False,
assert(left.columns.dtype == right.columns.dtype)
assert(left.columns.inferred_type == right.columns.inferred_type)
+
def assert_panel_equal(left, right, check_panel_type=False):
if check_panel_type:
assert(type(left) == type(right))
@@ -182,102 +195,125 @@ def assert_panel_equal(left, right, check_panel_type=False):
for col in right:
assert(col in left)
+
def assert_contains_all(iterable, dic):
for k in iterable:
assert(k in dic)
+
def getCols(k):
return string.ascii_uppercase[:k]
+
def makeStringIndex(k):
return Index([rands(10) for _ in xrange(k)])
+
def makeUnicodeIndex(k):
return Index([randu(10) for _ in xrange(k)])
+
def makeIntIndex(k):
return Index(range(k))
+
def makeFloatIndex(k):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)))
+
def makeFloatSeries():
index = makeStringIndex(N)
return Series(randn(N), index=index)
+
def makeStringSeries():
index = makeStringIndex(N)
return Series(randn(N), index=index)
+
def makeObjectSeries():
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index)
+
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
+
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
+
def getArangeMat():
return np.arange(N * K).reshape((N, K))
+
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
- 'A' : [0., 1., 2., 3., 4.],
- 'B' : [0., 1., 0., 1., 0.],
- 'C' : ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
- 'D' : bdate_range('1/1/2009', periods=5)
+ 'A': [0., 1., 2., 3., 4.],
+ 'B': [0., 1., 0., 1., 0.],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': bdate_range('1/1/2009', periods=5)
}
return index, data
+
def makeDateIndex(k):
- dt = datetime(2000,1,1)
+ dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k)
return DatetimeIndex(dr)
+
def makePeriodIndex(k):
- dt = datetime(2000,1,1)
+ dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B')
return dr
+
def makeTimeSeries(nper=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper))
+
def makePeriodSeries(nper=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper))
+
def getTimeSeriesData():
return dict((c, makeTimeSeries()) for c in getCols(K))
+
def makeTimeDataFrame():
data = getTimeSeriesData()
return DataFrame(data)
+
def getPeriodData():
return dict((c, makePeriodSeries()) for c in getCols(K))
+
def makePeriodFrame():
data = getPeriodData()
return DataFrame(data)
+
def makePanel():
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame()) for c in cols)
return Panel.fromDict(data)
+
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
@@ -285,6 +321,7 @@ def add_nans(panel):
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
+
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
@@ -327,7 +364,7 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion,
else:
msg = 'module requires %s' % pkg_name
if version:
- msg += ' with version >= %s' % (version,)
+ msg += ' with version >= %s' % (version,)
try:
mod = __import__(pkg_name)
except ImportError:
@@ -341,6 +378,7 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion,
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
+
def skip_if_no_package(*args, **kwargs):
"""Raise SkipTest if package_check fails
@@ -357,6 +395,8 @@ def skip_if_no_package(*args, **kwargs):
#
# Additional tags decorators for nose
#
+
+
def network(t):
"""
Label a test as requiring network connection.
@@ -411,14 +451,15 @@ def __init__(self, obj, *args, **kwds):
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
- attrs[k]=v
+ attrs[k] = v
self.attrs = attrs
self.obj = obj
- def __getattribute__(self,name):
+ def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
- return attrs.get(name, type(obj).__getattribute__(obj,name))
+ return attrs.get(name, type(obj).__getattribute__(obj, name))
+
@contextmanager
def stdin_encoding(encoding=None):
| Except for test_\* files i ran every file through pep8, and resolved most of the violations.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2086 | 2012-10-19T21:57:12Z | 2012-10-31T20:27:52Z | 2012-10-31T20:27:52Z | 2012-10-31T20:27:52Z |
CLN: cleanup tox.ini, remove stale pandas/setup.py, fix tox warning | diff --git a/pandas/setup.py b/pandas/setup.py
deleted file mode 100644
index f9945f0fdaab1..0000000000000
--- a/pandas/setup.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-
-import numpy
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('pandas', parent_package, top_path)
- config.add_subpackage('core')
- config.add_subpackage('io')
- config.add_subpackage('rpy')
- config.add_subpackage('sandbox')
- config.add_subpackage('stats')
- config.add_subpackage('util')
- config.add_data_dir('tests')
-
- config.add_extension('_tseries',
- sources=['src/tseries.c'],
- include_dirs=[numpy.get_include()])
- config.add_extension('_sparse',
- sources=['src/sparse.c'],
- include_dirs=[numpy.get_include()])
- return config
-
-if __name__ == '__main__':
- print('This is the wrong setup.py file to run')
-
diff --git a/tox.ini b/tox.ini
index 9baf33cf8d2f9..f4e03e1677344 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,18 +7,36 @@
envlist = py25, py26, py27, py31, py32
[testenv]
-commands =
- {envpython} setup.py clean build_ext install
- {envbindir}/nosetests tests
- /bin/rm -rf {toxinidir}/build {toxinidir}/tests
deps =
cython
numpy >= 1.6.1
nose
pytz
+# cd to anything but the default {toxinidir} which
+# contains the pandas subdirectory and confuses
+# nose away from the fresh install in site-packages
+changedir = {envdir}
+
+commands =
+ # TODO: --exe because of GH #761
+ {envbindir}/nosetests --exe pandas.tests
+ # cleanup the temp. build dir created by the tox build
+ /bin/rm -rf {toxinidir}/build
+
+ # quietly rollback the install.
+ # Note this line will only be reached if the tests
+ # previous lines succeed (in particular, the tests),
+ # but an uninstall is really only required when
+ # files are removed from source tree, in which case,
+ # stale versions of files will will remain in the venv,
+ # until the next time uninstall is run.
+ #
+ # tox should provide a preinstall-commands hook.
+ pip uninstall pandas -qy
+
+
[testenv:py25]
-changedir = .tox/py25/lib/python2.5/site-packages/pandas
deps =
cython
numpy >= 1.6.1
@@ -27,13 +45,9 @@ deps =
simplejson
[testenv:py26]
-changedir = .tox/py26/lib/python2.6/site-packages/pandas
[testenv:py27]
-changedir = .tox/py27/lib/python2.7/site-packages/pandas
[testenv:py31]
-changedir = .tox/py31/lib/python3.1/site-packages/pandas
[testenv:py32]
-changedir = .tox/py32/lib/python3.2/site-packages/pandas
| I always find it hard to get Packaging issues right, i'd appreciate if someone
would back me up on pandas/setup.py being superfluous.
I made sure that setup.py install/develop still works, and that introducing
a failed test is caught by tox as you would expect,
Also see #2029 for much faster tox runs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/2063 | 2012-10-12T17:08:38Z | 2012-10-12T23:39:58Z | 2012-10-12T23:39:58Z | 2012-10-12T23:40:06Z |
DOC: various small fixes | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index c6e919eb6096c..3c3c67092c8f1 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -26,7 +26,7 @@ objects. To get started, import numpy and load pandas into your namespace:
randn = np.random.randn
from pandas import *
-Here is a basic tenet to keep in mind: **data alignment is intrinsic**. Link
+Here is a basic tenet to keep in mind: **data alignment is intrinsic**. The link
between labels and data will not be broken unless done so explicitly by you.
We'll give a brief intro to the data structures, then consider all of the broad
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 1494871b6262d..508f6076f075d 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -164,7 +164,7 @@ You can also use a list of columns to create a hierarchical index:
The ``dialect`` keyword gives greater flexibility in specifying the file format.
By default it uses the Excel dialect but you can specify either the dialect name
-or a :class:``python:csv.Dialect`` instance.
+or a :class:`python:csv.Dialect` instance.
.. ipython:: python
:suppress:
@@ -286,6 +286,13 @@ data columns:
index_col=0) #index is the nominal column
df
+**Note**: When passing a dict as the `parse_dates` argument, the order of
+the columns prepended is not guaranteed, because `dict` objects do not impose
+an ordering on their keys. On Python 2.7+ you may use `collections.OrderedDict`
+instead of a regular `dict` if this matters to you. Because of this, when using a
+dict for 'parse_dates' in conjunction with the `index_col` argument, it's best to
+specify `index_col` as a column label rather then as an index on the resulting frame.
+
Date Parsing Functions
~~~~~~~~~~~~~~~~~~~~~~
Finally, the parser allows you can specify a custom ``date_parser`` function to
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 3e7fa29806091..f4cbbe7a074a7 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -327,6 +327,8 @@ for Fourier series. By coloring these curves differently for each class
it is possible to visualize data clustering. Curves belonging to samples
of the same class will usually be closer together and form larger structures.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+
.. ipython:: python
from pandas import read_csv
@@ -440,6 +442,8 @@ forces acting on our sample are at an equilibrium) is where a dot representing
our sample will be drawn. Depending on which class that sample belongs it will
be colored differently.
+**Note**: The "Iris" dataset is available `here <https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv>`__.
+
.. ipython:: python
from pandas import read_csv
| https://api.github.com/repos/pandas-dev/pandas/pulls/2056 | 2012-10-11T16:45:43Z | 2012-10-31T20:29:34Z | 2012-10-31T20:29:34Z | 2014-07-08T10:21:47Z | |
To csv infs | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index a502d5c78a9ae..f514139a9170f 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -57,11 +57,11 @@ to handling missing data. While ``NaN`` is the default missing value marker for
reasons of computational speed and convenience, we need to be able to easily
detect this value with data of different types: floating point, integer,
boolean, and general object. In many cases, however, the Python ``None`` will
-arise and we wish to also consider that "missing" or "null". Lastly, for legacy
-reasons ``inf`` and ``-inf`` are also considered to be "null" in
-computations. Since in NumPy divide-by-zero generates ``inf`` or ``-inf`` and
-not ``NaN``, I think you will find this is a worthwhile trade-off (Zen of
-Python: "practicality beats purity").
+arise and we wish to also consider that "missing" or "null".
+
+Until recently, for legacy reasons ``inf`` and ``-inf`` were also
+considered to be "null" in computations. This is no longer the case by
+default; use the :func: `~pandas.core.common.use_inf_as_null` function to recover it.
.. _missing.isnull:
@@ -76,8 +76,9 @@ pandas provides the :func:`~pandas.core.common.isnull` and
isnull(df2['one'])
df2['four'].notnull()
-**Summary:** ``NaN``, ``inf``, ``-inf``, and ``None`` (in object arrays) are
-all considered missing by the ``isnull`` and ``notnull`` functions.
+**Summary:** ``NaN`` and ``None`` (in object arrays) are considered
+missing by the ``isnull`` and ``notnull`` functions. ``inf`` and
+``-inf`` are no longer considered missing by default.
Calculations with missing data
------------------------------
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 668017c29c6ab..bfd8c6348d59a 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -65,6 +65,58 @@ def isnull(obj):
return _isnull_ndarraylike(obj)
else:
return obj is None
+isnull_new = isnull
+
+def isnull_old(obj):
+ '''
+ Replacement for numpy.isnan / -numpy.isfinite which is suitable
+ for use on object arrays. Treat None, NaN, INF, -INF as null.
+
+ Parameters
+ ----------
+ arr: ndarray or object value
+
+ Returns
+ -------
+ boolean ndarray or boolean
+ '''
+ if lib.isscalar(obj):
+ return lib.checknull_old(obj)
+
+ from pandas.core.generic import PandasObject
+ if isinstance(obj, np.ndarray):
+ return _isnull_ndarraylike_old(obj)
+ elif isinstance(obj, PandasObject):
+ # TODO: optimize for DataFrame, etc.
+ return obj.apply(isnull_old)
+ elif isinstance(obj, list) or hasattr(obj, '__array__'):
+ return _isnull_ndarraylike_old(obj)
+ else:
+ return obj is None
+
+def use_inf_as_null(flag):
+ '''
+ Choose which replacement for numpy.isnan / -numpy.isfinite is used.
+
+ Parameters
+ ----------
+ flag: bool
+ True means treat None, NaN, INF, -INF as null (old way),
+ False means None and NaN are null, but INF, -INF are not null
+ (new way).
+
+ Notes
+ -----
+ This approach to setting global module values is discussed and
+ approved here:
+
+ * http://stackoverflow.com/questions/4859217/programmatically-creating-variables-in-python/4859312#4859312
+ '''
+ if flag == True:
+ globals()['isnull'] = isnull_old
+ else:
+ globals()['isnull'] = isnull_new
+
def _isnull_ndarraylike(obj):
from pandas import Series
@@ -90,6 +142,30 @@ def _isnull_ndarraylike(obj):
result = -np.isfinite(obj)
return result
+def _isnull_ndarraylike_old(obj):
+ from pandas import Series
+ values = np.asarray(obj)
+
+ if values.dtype.kind in ('O', 'S', 'U'):
+ # Working around NumPy ticket 1542
+ shape = values.shape
+
+ if values.dtype.kind in ('S', 'U'):
+ result = np.zeros(values.shape, dtype=bool)
+ else:
+ result = np.empty(shape, dtype=bool)
+ vec = lib.isnullobj_old(values.ravel())
+ result[:] = vec.reshape(shape)
+
+ if isinstance(obj, Series):
+ result = Series(result, index=obj.index, copy=False)
+ elif values.dtype == np.dtype('M8[ns]'):
+ # this is the NaT pattern
+ result = values.view('i8') == lib.iNaT
+ else:
+ result = -np.isfinite(obj)
+ return result
+
def notnull(obj):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 91005ead01a24..40e959e9d81b6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1126,6 +1126,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
----------
path_or_buf : string or file handle / StringIO
File path
+ sep : character, default ","
+ Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
@@ -1143,12 +1145,13 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
+ nanRep : deprecated, use na_rep
mode : Python write mode, default 'w'
- sep : character, default ","
- Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
+ quoting : optional constant from csv module
+ defaults to csv.QUOTE_MINIMAL
"""
if nanRep is not None: # pragma: no cover
import warnings
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 2f699c2871cf4..2ccb09a7a123e 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -359,6 +359,8 @@ def maybe_convert_numeric(ndarray[object] values, set na_values,
if not seen_float:
if '.' in val:
seen_float = 1
+ elif 'inf' in val: # special case to handle +/-inf
+ seen_float = 1
else:
ints[i] = <int64_t> fval
diff --git a/pandas/src/sparse.pyx b/pandas/src/sparse.pyx
index 19ff2df23599e..579d473cae1b3 100644
--- a/pandas/src/sparse.pyx
+++ b/pandas/src/sparse.pyx
@@ -987,10 +987,12 @@ cdef inline float64_t __rsub(float64_t a, float64_t b):
cdef inline float64_t __div(float64_t a, float64_t b):
if b == 0:
- if a >= 0:
+ if a > 0:
return INF
- else:
+ elif a < 0:
return -INF
+ else:
+ return NaN
else:
return a / b
@@ -999,10 +1001,12 @@ cdef inline float64_t __rdiv(float64_t a, float64_t b):
cdef inline float64_t __floordiv(float64_t a, float64_t b):
if b == 0:
- if a >= 0:
+ if a > 0:
return INF
- else:
+ elif a < 0:
return -INF
+ else:
+ return NaN
else:
return a // b
diff --git a/pandas/src/tseries.pyx b/pandas/src/tseries.pyx
index 54641a78a08d9..65250c27bfd57 100644
--- a/pandas/src/tseries.pyx
+++ b/pandas/src/tseries.pyx
@@ -178,6 +178,18 @@ cdef double INF = <double> np.inf
cdef double NEGINF = -INF
cpdef checknull(object val):
+ if util.is_float_object(val) or util.is_complex_object(val):
+ return val != val and val != INF and val != NEGINF
+ elif util.is_datetime64_object(val):
+ return get_datetime64_value(val) == NPY_NAT
+ elif isinstance(val, _NaT):
+ return True
+ elif is_array(val):
+ return False
+ else:
+ return util._checknull(val)
+
+cpdef checknull_old(object val):
if util.is_float_object(val) or util.is_complex_object(val):
return val != val or val == INF or val == NEGINF
elif util.is_datetime64_object(val):
@@ -189,6 +201,7 @@ cpdef checknull(object val):
else:
return util._checknull(val)
+
def isscalar(object val):
return np.isscalar(val) or val is None or isinstance(val, _Timestamp)
@@ -206,6 +219,19 @@ def isnullobj(ndarray[object] arr):
result[i] = util._checknull(arr[i])
return result.view(np.bool_)
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def isnullobj_old(ndarray[object] arr):
+ cdef Py_ssize_t i, n
+ cdef object val
+ cdef ndarray[uint8_t] result
+
+ n = len(arr)
+ result = np.zeros(n, dtype=np.uint8)
+ for i from 0 <= i < n:
+ result[i] = util._checknull_old(arr[i])
+ return result.view(np.bool_)
+
@cython.wraparound(False)
@cython.boundscheck(False)
@@ -223,6 +249,22 @@ def isnullobj2d(ndarray[object, ndim=2] arr):
result[i, j] = 1
return result.view(np.bool_)
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def isnullobj2d_old(ndarray[object, ndim=2] arr):
+ cdef Py_ssize_t i, j, n, m
+ cdef object val
+ cdef ndarray[uint8_t, ndim=2] result
+
+ n, m = (<object> arr).shape
+ result = np.zeros((n, m), dtype=np.uint8)
+ for i from 0 <= i < n:
+ for j from 0 <= j < m:
+ val = arr[i, j]
+ if checknull_old(val):
+ result[i, j] = 1
+ return result.view(np.bool_)
+
def list_to_object_array(list obj):
'''
Convert list to object ndarray. Seriously can't believe I had to write this
diff --git a/pandas/src/util.pxd b/pandas/src/util.pxd
index fe6e4391c59e5..5d789e73973cc 100644
--- a/pandas/src/util.pxd
+++ b/pandas/src/util.pxd
@@ -60,6 +60,15 @@ cdef inline is_array(object o):
cdef inline bint _checknull(object val):
+ import numpy as np
+ cdef double INF = <double> np.inf
+ cdef double NEGINF = -INF
+ try:
+ return bool(val is None or (val != val and val != INF and val != NEGINF))
+ except ValueError:
+ return False
+
+cdef inline bint _checknull_old(object val):
try:
return bool(val is None or val != val)
except ValueError:
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index e2b0b918f0142..753c6a721cd94 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -6,7 +6,7 @@
import unittest
from pandas import Series, DataFrame, date_range, DatetimeIndex
-from pandas.core.common import notnull, isnull
+from pandas.core.common import notnull, isnull, use_inf_as_null
import pandas.core.common as com
import pandas.util.testing as tm
@@ -18,9 +18,17 @@ def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
+
+ use_inf_as_null(False)
+ assert notnull(np.inf)
+ assert notnull(-np.inf)
+
+ use_inf_as_null(True)
assert not notnull(np.inf)
assert not notnull(-np.inf)
+
+
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
@@ -30,8 +38,8 @@ def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
- assert isnull(np.inf)
- assert isnull(-np.inf)
+ assert not isnull(np.inf)
+ assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index cf37de4294f3e..485557db93671 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3382,6 +3382,36 @@ def test_to_csv_from_csv(self):
os.remove(path)
+ def test_to_csv_from_csv_w_some_infs(self):
+ path = '__tmp__'
+
+ # test roundtrip with inf, -inf, nan, as full columns and mix
+ self.frame['G'] = np.nan
+ self.frame['H'] = self.frame.index.map(lambda x: [np.inf, np.nan][np.random.rand() < .5])
+
+ self.frame.to_csv(path)
+ recons = DataFrame.from_csv(path)
+
+ assert_frame_equal(self.frame, recons)
+ assert_frame_equal(np.isinf(self.frame), np.isinf(recons))
+
+ os.remove(path)
+
+ def test_to_csv_from_csv_w_all_infs(self):
+ path = '__tmp__'
+
+ # test roundtrip with inf, -inf, nan, as full columns and mix
+ self.frame['E'] = np.inf
+ self.frame['F'] = -np.inf
+
+ self.frame.to_csv(path)
+ recons = DataFrame.from_csv(path)
+
+ assert_frame_equal(self.frame, recons)
+ assert_frame_equal(np.isinf(self.frame), np.isinf(recons))
+
+ os.remove(path)
+
def test_to_csv_multiindex(self):
path = '__tmp__'
diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py
index 9061402bb6050..a8578f67f6cec 100644
--- a/pandas/tests/test_tseries.py
+++ b/pandas/tests/test_tseries.py
@@ -290,6 +290,16 @@ def test_convert_objects():
result = lib.maybe_convert_objects(arr)
assert(result.dtype == np.object_)
+def test_convert_infs():
+ arr = np.array(['inf', 'inf', 'inf'], dtype='O')
+ result = lib.maybe_convert_numeric(arr, set(), False)
+ assert(result.dtype == np.float64)
+
+ arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
+ result = lib.maybe_convert_numeric(arr, set(), False)
+ assert(result.dtype == np.float64)
+
+
def test_convert_objects_ints():
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 904426731738a..a15be23c1c4c6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -103,8 +103,10 @@ def assert_almost_equal(a, b):
return
if isinstance(a, (bool, float, int)):
+ if np.isinf(a):
+ assert np.isinf(b), err_msg(a,b)
# case for zero
- if abs(a) < 1e-5:
+ elif abs(a) < 1e-5:
np.testing.assert_almost_equal(
a, b, decimal=5, err_msg=err_msg(a, b), verbose=False)
else:
| This branch makes inf/-inf different from nan/None (GH #1919), and also fixes a few bugs with INF and NaN values (GH #2026, #2041).
| https://api.github.com/repos/pandas-dev/pandas/pulls/2050 | 2012-10-09T17:58:45Z | 2012-12-02T03:14:36Z | 2012-12-02T03:14:36Z | 2014-06-20T15:11:02Z |
TST: flesh out EA setitem tests | diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index f1fa74192d4df..cb9a19b438feb 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -8,6 +8,35 @@
class BaseSetitemTests(BaseExtensionTests):
+ @pytest.fixture(
+ params=[
+ lambda x: x.index,
+ lambda x: list(x.index),
+ lambda x: slice(None),
+ lambda x: slice(0, len(x)),
+ lambda x: range(len(x)),
+ lambda x: list(range(len(x))),
+ lambda x: np.ones(len(x), dtype=bool),
+ ],
+ ids=[
+ "index",
+ "list[index]",
+ "null_slice",
+ "full_slice",
+ "range",
+ "list(range)",
+ "mask",
+ ],
+ )
+ def full_indexer(self, request):
+ """
+ Fixture for an indexer to pass to obj.loc to get/set the full length of the
+ object.
+
+ In some cases, assumes that obj.index is the default RangeIndex.
+ """
+ return request.param
+
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
@@ -305,30 +334,20 @@ def test_setitem_preserves_views(self, data):
assert view1[0] == data[1]
assert view2[0] == data[1]
- def test_setitem_dataframe_column_with_index(self, data):
+ def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = pd.DataFrame(index=df.index)
- result.loc[df.index, "data"] = df["data"]
- self.assert_frame_equal(result, expected)
- def test_setitem_dataframe_column_without_index(self, data):
- # https://github.com/pandas-dev/pandas/issues/32395
- df = expected = pd.DataFrame({"data": pd.Series(data)})
- result = pd.DataFrame(index=df.index)
- result.loc[:, "data"] = df["data"]
+ key = full_indexer(df)
+ result.loc[key, "data"] = df["data"]
self.assert_frame_equal(result, expected)
- def test_setitem_series_with_index(self, data):
+ def test_setitem_series(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
ser = expected = pd.Series(data, name="data")
result = pd.Series(index=ser.index, dtype=object, name="data")
- result.loc[ser.index] = ser
- self.assert_series_equal(result, expected)
- def test_setitem_series_without_index(self, data):
- # https://github.com/pandas-dev/pandas/issues/32395
- ser = expected = pd.Series(data, name="data")
- result = pd.Series(index=ser.index, dtype=object, name="data")
- result.loc[:] = ser
+ key = full_indexer(ser)
+ result.loc[key] = ser
self.assert_series_equal(result, expected)
| Preliminary to fixing some behavior that these hit | https://api.github.com/repos/pandas-dev/pandas/pulls/39034 | 2021-01-08T04:44:42Z | 2021-01-08T14:12:27Z | 2021-01-08T14:12:27Z | 2021-01-08T15:51:42Z |
Backport PR #39023 on branch 1.2.x (Fix regression in setitem when expanding DataFrame with specific column name format) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index baeca87b8c4f8..4b7a4180ee9f9 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in repr of float-like strings of an ``object`` dtype having trailing 0's truncated after the decimal (:issue:`38708`)
- Fixed regression in :meth:`DataFrame.groupby()` with :class:`Categorical` grouping column not showing unused categories for ``grouped.indices`` (:issue:`38642`)
- Fixed regression in :meth:`DataFrame.any` and :meth:`DataFrame.all` not returning a result for tz-aware ``datetime64`` columns (:issue:`38723`)
+- Fixed regression in :meth:`DataFrame.__setitem__` raising ``ValueError`` when expanding :class:`DataFrame` and new column is from type ``"0 - name"`` (:issue:`39010`)
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index d8b0ad739b056..73cf20979a8ad 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1382,7 +1382,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
return False
try:
dtype = get_dtype(arr_or_dtype)
- except TypeError:
+ except (TypeError, ValueError):
return False
if isinstance(arr_or_dtype, CategoricalDtype):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 19d80b714a674..128f505402eff 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -557,6 +557,11 @@ def test_is_bool_dtype():
assert com.is_bool_dtype("boolean")
+def test_is_bool_dtype_numpy_error():
+ # GH39010
+ assert not com.is_bool_dtype("0 - Name")
+
+
@pytest.mark.filterwarnings("ignore:'is_extension_type' is deprecated:FutureWarning")
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 19d2f8301037a..cedef4784e4a1 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -356,6 +356,13 @@ def test_setitem_listlike_views(self):
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
+ def test_setitem_string_column_numpy_dtype_raising(self):
+ # GH#39010
+ df = DataFrame([[1, 2], [3, 4]])
+ df["0 - Name"] = [5, 6]
+ expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
| Backport PR #39023: Fix regression in setitem when expanding DataFrame with specific column name format | https://api.github.com/repos/pandas-dev/pandas/pulls/39032 | 2021-01-08T00:20:10Z | 2021-01-08T01:14:21Z | 2021-01-08T01:14:21Z | 2021-01-08T01:14:21Z |
CLN: add typing to dtype arg in core/internals, core/reshape and core (GH38808) | diff --git a/pandas/core/base.py b/pandas/core/base.py
index afc22a8446dce..b603ba31f51dd 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -19,7 +19,7 @@
import numpy as np
import pandas._libs.lib as lib
-from pandas._typing import DtypeObj, IndexLabel
+from pandas._typing import Dtype, DtypeObj, IndexLabel
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -500,7 +500,13 @@ def array(self) -> ExtensionArray:
"""
raise AbstractMethodError(self)
- def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
+ def to_numpy(
+ self,
+ dtype: Optional[Dtype] = None,
+ copy: bool = False,
+ na_value=lib.no_default,
+ **kwargs,
+ ):
"""
A NumPy ndarray representing the values in this Series or Index.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fe86bf3f582ca..2f4340c17c5a7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -37,6 +37,8 @@
from pandas._typing import (
Axis,
CompressionOptions,
+ Dtype,
+ DtypeArg,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
@@ -44,6 +46,7 @@
JSONSerializable,
Label,
Level,
+ NpDtype,
Renamer,
StorageOptions,
TimedeltaConvertibleTypes,
@@ -210,7 +213,9 @@ def __init__(
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
- def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager:
+ def _init_mgr(
+ cls, mgr, axes, dtype: Optional[Dtype] = None, copy: bool = False
+ ) -> BlockManager:
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
@@ -1901,7 +1906,7 @@ def empty(self) -> bool_t:
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(
@@ -2642,7 +2647,7 @@ def to_sql(
index: bool_t = True,
index_label=None,
chunksize=None,
- dtype=None,
+ dtype: Optional[DtypeArg] = None,
method=None,
) -> None:
"""
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 94c7d325d0bc8..06ed64401a38f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -15,7 +15,7 @@
)
from pandas._libs.internals import BlockPlacement
from pandas._libs.tslibs import conversion
-from pandas._typing import ArrayLike, DtypeObj, Scalar, Shape
+from pandas._typing import ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -241,7 +241,7 @@ def array_values(self) -> ExtensionArray:
"""
return PandasArray(self.values)
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
@@ -1669,7 +1669,7 @@ def setitem(self, indexer, value):
self.values[indexer] = value
return self
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
# ExtensionArrays must be iterable, so this works.
# TODO(EA2D): reshape not needed with 2D EAs
return np.asarray(self.values).reshape(self.shape)
@@ -1990,7 +1990,7 @@ class DatetimeLikeBlockMixin(Block):
_can_hold_na = True
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
@@ -2168,7 +2168,7 @@ def is_view(self) -> bool:
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
Returns an ndarray of values.
@@ -2449,7 +2449,7 @@ def replace(
# Constructor Helpers
-def get_block_type(values, dtype=None):
+def get_block_type(values, dtype: Optional[Dtype] = None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
@@ -2464,7 +2464,7 @@ def get_block_type(values, dtype=None):
"""
# We use vtype and kind checks because they are much more performant
# than is_foo_dtype
- dtype = dtype or values.dtype
+ dtype = cast(np.dtype, pandas_dtype(dtype) if dtype else values.dtype)
vtype = dtype.type
kind = dtype.kind
@@ -2500,7 +2500,7 @@ def get_block_type(values, dtype=None):
return cls
-def make_block(values, placement, klass=None, ndim=None, dtype=None):
+def make_block(values, placement, klass=None, ndim=None, dtype: Optional[Dtype] = None):
# Ensure that we don't allow PandasArray / PandasDtype in internals.
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCPandasArray):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index d44a3df45587a..d27efd98ab079 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -17,7 +17,7 @@
import numpy as np
from pandas._libs import internals as libinternals, lib
-from pandas._typing import ArrayLike, DtypeObj, Label, Shape
+from pandas._typing import ArrayLike, Dtype, DtypeObj, Label, Shape
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_bool_kwarg
@@ -816,7 +816,7 @@ def copy_func(ax):
def as_array(
self,
transpose: bool = False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
@@ -872,7 +872,9 @@ def as_array(
return arr.transpose() if transpose else arr
- def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray:
+ def _interleave(
+ self, dtype: Optional[Dtype] = None, na_value=lib.no_default
+ ) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
@@ -1842,7 +1844,7 @@ def _simple_blockify(tuples, dtype) -> List[Block]:
return [block]
-def _multi_blockify(tuples, dtype=None):
+def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index d855886fb725f..fc8d2aee1e6cd 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -6,6 +6,7 @@
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
+from pandas._typing import Dtype
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
@@ -732,11 +733,11 @@ def get_dummies(
data,
prefix=None,
prefix_sep="_",
- dummy_na=False,
+ dummy_na: bool = False,
columns=None,
- sparse=False,
- drop_first=False,
- dtype=None,
+ sparse: bool = False,
+ drop_first: bool = False,
+ dtype: Optional[Dtype] = None,
) -> "DataFrame":
"""
Convert categorical variable into dummy/indicator variables.
@@ -921,7 +922,7 @@ def _get_dummies_1d(
dummy_na=False,
sparse=False,
drop_first=False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
):
from pandas.core.reshape.concat import concat
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f3f80677d0fe4..668cad4f64ac3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -28,10 +28,12 @@
AggFuncType,
ArrayLike,
Axis,
+ Dtype,
DtypeObj,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
+ NpDtype,
StorageOptions,
ValueKeyFunc,
)
@@ -214,7 +216,13 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
# Constructors
def __init__(
- self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
+ self,
+ data=None,
+ index=None,
+ dtype: Optional[Dtype] = None,
+ name=None,
+ copy: bool = False,
+ fastpath: bool = False,
):
if (
@@ -337,7 +345,7 @@ def __init__(
self.name = name
self._set_axis(0, index, fastpath=True)
- def _init_dict(self, data, index=None, dtype=None):
+ def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
@@ -612,7 +620,7 @@ def __len__(self) -> int:
"""
return len(self._mgr)
- def view(self, dtype=None) -> "Series":
+ def view(self, dtype: Optional[Dtype] = None) -> "Series":
"""
Create a new view of the Series.
@@ -686,7 +694,7 @@ def view(self, dtype=None) -> "Series":
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Return the values as a NumPy array.
| incremental PR for issue #38808
- [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39030 | 2021-01-07T23:30:44Z | 2021-01-08T23:09:18Z | 2021-01-08T23:09:18Z | 2021-01-08T23:09:19Z |
BUG: read_csv does not close file during an error in _make_reader | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 849b599141c2b..1c8db4dd32393 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -40,7 +40,7 @@ Bug fixes
~~~~~~~~~
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
--
+- Bug in :func:`read_csv` not closing an opened file handle when a ``csv.Error`` or ``UnicodeDecodeError`` occurred while initializing (:issue:`39024`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index ca817be5d2ff6..e58e59a722b7a 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2297,7 +2297,11 @@ def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
self._open_handles(f, kwds)
assert self.handles is not None
assert hasattr(self.handles.handle, "readline")
- self._make_reader(self.handles.handle)
+ try:
+ self._make_reader(self.handles.handle)
+ except (csv.Error, UnicodeDecodeError):
+ self.close()
+ raise
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index a2787ddad3683..57defb400b842 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -3,13 +3,17 @@
specific classification into the other test modules.
"""
import codecs
+import csv
from io import StringIO
import os
+from pathlib import Path
+import warnings
import numpy as np
import pytest
from pandas.errors import EmptyDataError, ParserError
+import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
@@ -208,3 +212,22 @@ def test_null_byte_char(all_parsers):
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
+
+
+@td.check_file_leaks
+def test_open_file(all_parsers):
+ # GH 39024
+ parser = all_parsers
+ if parser.engine == "c":
+ pytest.skip()
+
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+
+ # should not trigger a ResourceWarning
+ warnings.simplefilter("always", category=ResourceWarning)
+ with warnings.catch_warnings(record=True) as record:
+ with pytest.raises(csv.Error, match="Could not determine delimiter"):
+ parser.read_csv(file, sep=None)
+ assert len(record) == 0, record[0].message
| - [x] closes #39024
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I don't understand why `td.check_file_leaks` doesn't complain about the left opened file (at least for me locally). I commented the close call on purpose to see whether the test fails at least for the CI.
@jbrockmendel I think you have debugged ResourceWarnings in the past. Do you know why the test doesn't fail? Even putting a `open("foo", mode="w")` in the test doesn't make it fail.
[The test case is different from #39024 but the symptoms are the same. Unless the except clause is narrowed down to specific exceptions, this PR will fix #39024] | https://api.github.com/repos/pandas-dev/pandas/pulls/39029 | 2021-01-07T22:14:19Z | 2021-01-13T18:03:45Z | 2021-01-13T18:03:45Z | 2021-01-14T11:05:23Z |
BUG: Resample.aggregate raising TypeError instead of SpecificationError with missing keys dtypes | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..61e747e1d5a53 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -306,6 +306,7 @@ Groupby/resample/rolling
- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
- Fixed bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`)
- Fixed bug in :meth:`DataFrameGroupBy.cumsum`, :meth:`SeriesGroupBy.cumsum`, :meth:`DataFrameGroupBy.mean` and :meth:`SeriesGroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`)
+- Bug in :meth:`.Resampler.aggregate` and :meth:`DataFrame.transform` raising ``TypeError`` instead of ``SpecificationError`` when missing keys having mixed dtypes (:issue:`39025`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index c64f0bd71cf84..cd169a250b49b 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -35,6 +35,7 @@
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries
+from pandas.core.algorithms import safe_sort
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
@@ -482,9 +483,10 @@ def transform_dict_like(
if obj.ndim != 1:
# Check for missing columns on a frame
- cols = sorted(set(func.keys()) - set(obj.columns))
+ cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
- raise SpecificationError(f"Column(s) {cols} do not exist")
+ cols_sorted = list(safe_sort(list(cols)))
+ raise SpecificationError(f"Column(s) {cols_sorted} do not exist")
# Can't use func.values(); wouldn't work for a Series
if any(is_dict_like(v) for _, v in func.items()):
@@ -738,7 +740,11 @@ def agg_dict_like(
if isinstance(selected_obj, ABCDataFrame) and len(
selected_obj.columns.intersection(keys)
) != len(keys):
- cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
+ cols = list(
+ safe_sort(
+ list(set(keys) - set(selected_obj.columns.intersection(keys))),
+ )
+ )
raise SpecificationError(f"Column(s) {cols} do not exist")
from pandas.core.reshape.concat import concat
diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py
index db5b2f3d86dfe..bff0306a50ee6 100644
--- a/pandas/tests/frame/apply/test_frame_transform.py
+++ b/pandas/tests/frame/apply/test_frame_transform.py
@@ -253,8 +253,24 @@ def f(x, a, b, c):
def test_transform_missing_columns(axis):
- # GH 35964
+ # GH#35964
df = DataFrame({"A": [1, 2], "B": [3, 4]})
match = re.escape("Column(s) ['C'] do not exist")
with pytest.raises(SpecificationError, match=match):
df.transform({"C": "cumsum"})
+
+
+def test_transform_none_to_type():
+ # GH#34377
+ df = DataFrame({"a": [None]})
+ msg = "Transform function failed"
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"a": int})
+
+
+def test_transform_mixed_column_name_dtypes():
+ # GH39025
+ df = DataFrame({"a": ["1"]})
+ msg = r"Column\(s\) \[1, 'b'\] do not exist"
+ with pytest.raises(SpecificationError, match=msg):
+ df.transform({"a": int, 1: str, "b": int})
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 2cd9bb70385bf..d217957cbe08a 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -297,6 +297,21 @@ def test_agg_consistency():
r.agg({"r1": "mean", "r2": "sum"})
+def test_agg_consistency_int_str_column_mix():
+ # GH#39025
+ df = DataFrame(
+ np.random.randn(1000, 2),
+ index=pd.date_range("1/1/2012", freq="S", periods=1000),
+ columns=[1, "a"],
+ )
+
+ r = df.resample("3T")
+
+ msg = r"Column\(s\) \[2, 'b'\] do not exist"
+ with pytest.raises(pd.core.base.SpecificationError, match=msg):
+ r.agg({2: "mean", "b": "sum"})
+
+
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py
index 992aaa540a65f..73cc789c6eb3a 100644
--- a/pandas/tests/series/apply/test_series_transform.py
+++ b/pandas/tests/series/apply/test_series_transform.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, concat
+from pandas import Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.base import transformation_kernels
@@ -65,14 +65,6 @@ def test_transform_wont_agg(string_series):
string_series.transform(["sqrt", "max"])
-def test_transform_none_to_type():
- # GH34377
- df = DataFrame({"a": [None]})
- msg = "Transform function failed"
- with pytest.raises(ValueError, match=msg):
- df.transform({"a": int})
-
-
def test_transform_axis_1_raises():
# GH 35964
msg = "No axis named 1 for object type Series"
| - [x] closes #39025
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39028 | 2021-01-07T21:58:05Z | 2021-01-08T23:17:59Z | 2021-01-08T23:17:59Z | 2021-01-08T23:18:38Z |
REF: make FreqGroup an Enum | diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 70acb42712201..415bdf74db80a 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -23,7 +23,7 @@ cdef class PeriodDtypeBase:
return self._dtype_code == other._dtype_code
@property
- def freq_group(self) -> int:
+ def freq_group_code(self) -> int:
# See also: libperiod.get_freq_group
return (self._dtype_code // 1000) * 1000
@@ -37,7 +37,6 @@ cdef class PeriodDtypeBase:
from .offsets import to_offset
freqstr = _reverse_period_code_map.get(self._dtype_code)
- # equiv: freqstr = libfrequencies.get_freq_str(self._dtype_code)
return to_offset(freqstr)
@@ -134,7 +133,7 @@ cdef dict attrname_to_abbrevs = _attrname_to_abbrevs
cdef dict _abbrev_to_attrnames = {v: k for k, v in attrname_to_abbrevs.items()}
-class FreqGroup:
+class FreqGroup(Enum):
# Mirrors c_FreqGroup in the .pxd file
FR_ANN = 1000
FR_QTR = 2000
@@ -151,9 +150,10 @@ class FreqGroup:
FR_UND = -10000 # undefined
@staticmethod
- def get_freq_group(code: int) -> int:
- # See also: PeriodDtypeBase.freq_group
- return (code // 1000) * 1000
+ def get_freq_group(code: int) -> "FreqGroup":
+ # See also: PeriodDtypeBase.freq_group_code
+ code = (code // 1000) * 1000
+ return FreqGroup(code)
class Resolution(Enum):
@@ -178,8 +178,7 @@ class Resolution(Enum):
return self.value >= other.value
@property
- def freq_group(self):
- # TODO: annotate as returning FreqGroup once that is an enum
+ def freq_group(self) -> FreqGroup:
if self == Resolution.RESO_NS:
return FreqGroup.FR_NS
elif self == Resolution.RESO_US:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index edcc1f29a5ec2..96a075dd21bf9 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1068,11 +1068,11 @@ def _range_from_fields(
if quarter is not None:
if freq is None:
freq = to_offset("Q")
- base = FreqGroup.FR_QTR
+ base = FreqGroup.FR_QTR.value
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
- if base != FreqGroup.FR_QTR:
+ if base != FreqGroup.FR_QTR.value:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 932868451058f..8609c61065327 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -571,7 +571,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
raise KeyError
grp = reso.freq_group
- per = Period(parsed, freq=grp)
+ per = Period(parsed, freq=grp.value)
start, end = per.start_time, per.end_time
# GH 24076
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 7762198246603..8fe92ed757401 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -506,8 +506,8 @@ def get_loc(self, key, method=None, tolerance=None):
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso)
- grp = reso.freq_group
- freqn = self.dtype.freq_group
+ grp = reso.freq_group.value
+ freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
@@ -580,15 +580,15 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str):
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
- iv = Period(parsed, freq=grp)
+ iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
- freqn = self.dtype.freq_group
+ freqn = self.dtype.freq_group_code
- if not grp < freqn:
+ if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 978010efd7ee5..3d2d69162c70a 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -510,28 +510,28 @@ def _daily_finder(vmin, vmax, freq: BaseOffset):
periodsperday = -1
- if dtype_code >= FreqGroup.FR_HR:
- if dtype_code == FreqGroup.FR_NS:
+ if dtype_code >= FreqGroup.FR_HR.value:
+ if dtype_code == FreqGroup.FR_NS.value:
periodsperday = 24 * 60 * 60 * 1000000000
- elif dtype_code == FreqGroup.FR_US:
+ elif dtype_code == FreqGroup.FR_US.value:
periodsperday = 24 * 60 * 60 * 1000000
- elif dtype_code == FreqGroup.FR_MS:
+ elif dtype_code == FreqGroup.FR_MS.value:
periodsperday = 24 * 60 * 60 * 1000
- elif dtype_code == FreqGroup.FR_SEC:
+ elif dtype_code == FreqGroup.FR_SEC.value:
periodsperday = 24 * 60 * 60
- elif dtype_code == FreqGroup.FR_MIN:
+ elif dtype_code == FreqGroup.FR_MIN.value:
periodsperday = 24 * 60
- elif dtype_code == FreqGroup.FR_HR:
+ elif dtype_code == FreqGroup.FR_HR.value:
periodsperday = 24
else: # pragma: no cover
raise ValueError(f"unexpected frequency: {dtype_code}")
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
- elif dtype_code == FreqGroup.FR_BUS:
+ elif dtype_code == FreqGroup.FR_BUS.value:
periodsperyear = 261
periodspermonth = 19
- elif dtype_code == FreqGroup.FR_DAY:
+ elif dtype_code == FreqGroup.FR_DAY.value:
periodsperyear = 365
periodspermonth = 28
elif FreqGroup.get_freq_group(dtype_code) == FreqGroup.FR_WK:
@@ -661,7 +661,7 @@ def _second_finder(label_interval):
elif span <= periodsperyear // 4:
month_start = period_break(dates_, "month")
info_maj[month_start] = True
- if dtype_code < FreqGroup.FR_HR:
+ if dtype_code < FreqGroup.FR_HR.value:
info["min"] = True
else:
day_start = period_break(dates_, "day")
@@ -872,14 +872,15 @@ def _annual_finder(vmin, vmax, freq):
def get_finder(freq: BaseOffset):
dtype_code = freq._period_dtype_code
fgroup = (dtype_code // 1000) * 1000
+ fgroup = FreqGroup(fgroup)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
- elif dtype_code == FreqGroup.FR_MTH:
+ elif dtype_code == FreqGroup.FR_MTH.value:
return _monthly_finder
- elif (dtype_code >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK:
+ elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK:
return _daily_finder
else: # pragma: no cover
raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index ae4fff7b495d0..e04b03e5b0420 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -215,7 +215,7 @@ def use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool:
if isinstance(data.index, ABCDatetimeIndex):
base = to_offset(freq)._period_dtype_code
x = data.index
- if base <= FreqGroup.FR_DAY:
+ if base <= FreqGroup.FR_DAY.value:
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp().tz_localize(x.tz) == x[0]
return True
| https://api.github.com/repos/pandas-dev/pandas/pulls/39027 | 2021-01-07T21:44:54Z | 2021-01-08T14:11:22Z | 2021-01-08T14:11:22Z | 2021-01-08T15:53:46Z | |
TYP/CLN: Use futures annotations in apply | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index edb6b97a73e7f..ac98f3736be6d 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import abc
import inspect
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Type, cast
@@ -33,7 +35,7 @@
def frame_apply(
- obj: "DataFrame",
+ obj: DataFrame,
how: str,
func: AggFuncType,
axis: Axis = 0,
@@ -69,22 +71,22 @@ class FrameApply(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
- def series_generator(self) -> Iterator["Series"]:
+ def series_generator(self) -> Iterator[Series]:
pass
@abc.abstractmethod
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
pass
@@ -92,7 +94,7 @@ def wrap_results_for_axis(
def __init__(
self,
- obj: "DataFrame",
+ obj: DataFrame,
how: str,
func,
raw: bool,
@@ -131,15 +133,15 @@ def f(x):
self.f: AggFuncType = f
@property
- def res_columns(self) -> "Index":
+ def res_columns(self) -> Index:
return self.result_columns
@property
- def columns(self) -> "Index":
+ def columns(self) -> Index:
return self.obj.columns
@property
- def index(self) -> "Index":
+ def index(self) -> Index:
return self.obj.index
@cache_readonly
@@ -147,11 +149,11 @@ def values(self):
return self.obj.values
@cache_readonly
- def dtypes(self) -> "Series":
+ def dtypes(self) -> Series:
return self.obj.dtypes
@property
- def agg_axis(self) -> "Index":
+ def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
def get_result(self):
@@ -311,7 +313,7 @@ def wrapper(*args, **kwargs):
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
assert callable(self.f)
result_values = np.empty_like(target.values)
@@ -346,7 +348,7 @@ def apply_standard(self):
# wrap results
return self.wrap_results(results, res_index)
- def apply_series_generator(self) -> Tuple[ResType, "Index"]:
+ def apply_series_generator(self) -> Tuple[ResType, Index]:
assert callable(self.f)
series_gen = self.series_generator
@@ -365,7 +367,7 @@ def apply_series_generator(self) -> Tuple[ResType, "Index"]:
return results, res_index
- def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUnion:
+ def wrap_results(self, results: ResType, res_index: Index) -> FrameOrSeriesUnion:
from pandas import Series
# see if we can infer the results
@@ -392,7 +394,7 @@ def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUni
class FrameRowApply(FrameApply):
axis = 0
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
return super().apply_broadcast(target)
@property
@@ -400,15 +402,15 @@ def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
return self.columns
@property
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
return self.index
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
""" return the results for the rows """
@@ -452,7 +454,7 @@ def wrap_results_for_axis(
class FrameColumnApply(FrameApply):
axis = 1
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
result = super().apply_broadcast(target.T)
return result.T
@@ -483,15 +485,15 @@ def series_generator(self):
yield ser
@property
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
return self.index
@property
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
return self.columns
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
""" return the results for the columns """
result: FrameOrSeriesUnion
@@ -511,7 +513,7 @@ def wrap_results_for_axis(
return result
- def infer_to_same_shape(self, results: ResType, res_index: "Index") -> "DataFrame":
+ def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
""" infer the results to the same shape as the input object """
result = self.obj._constructor(data=results)
result = result.T
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39026 | 2021-01-07T21:39:52Z | 2021-01-08T13:09:00Z | 2021-01-08T13:09:00Z | 2021-01-09T01:57:48Z |
Fix regression in setitem when expanding DataFrame with specific column name format | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..39e5b67fbbc37 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in repr of float-like strings of an ``object`` dtype having trailing 0's truncated after the decimal (:issue:`38708`)
- Fixed regression in :meth:`DataFrame.groupby()` with :class:`Categorical` grouping column not showing unused categories for ``grouped.indices`` (:issue:`38642`)
- Fixed regression in :meth:`DataFrame.any` and :meth:`DataFrame.all` not returning a result for tz-aware ``datetime64`` columns (:issue:`38723`)
+- Fixed regression in :meth:`DataFrame.__setitem__` raising ``ValueError`` when expanding :class:`DataFrame` and new column is from type ``"0 - name"`` (:issue:`39010`)
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 1993c41db03f8..9861a466b2d2f 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1395,7 +1395,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
return False
try:
dtype = get_dtype(arr_or_dtype)
- except TypeError:
+ except (TypeError, ValueError):
return False
if isinstance(arr_or_dtype, CategoricalDtype):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 8df61394e8e7e..a5522e503c7f4 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -557,6 +557,11 @@ def test_is_bool_dtype():
assert com.is_bool_dtype("boolean")
+def test_is_bool_dtype_numpy_error():
+ # GH39010
+ assert not com.is_bool_dtype("0 - Name")
+
+
@pytest.mark.filterwarnings("ignore:'is_extension_type' is deprecated:FutureWarning")
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 28b1f02ff020c..a838b09b39be6 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -366,6 +366,13 @@ def test_setitem_listlike_views(self):
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
+ def test_setitem_string_column_numpy_dtype_raising(self):
+ # GH#39010
+ df = DataFrame([[1, 2], [3, 4]])
+ df["0 - Name"] = [5, 6]
+ expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
| - [x] closes #39010
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
We obviously want cast to object if value is string. | https://api.github.com/repos/pandas-dev/pandas/pulls/39023 | 2021-01-07T19:30:03Z | 2021-01-08T00:19:44Z | 2021-01-08T00:19:44Z | 2021-01-08T08:41:05Z |
Deprecate DataFrame indexer for iloc setitem and getitem | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 41db72612a66b..c5ff5265f6798 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -128,6 +128,7 @@ Other enhancements
- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`)
- :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`)
- :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`)
+- Disallow :class:`DataFrame` indexer for ``iloc`` for :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__`, (:issue:`39004`)
- :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`)
- :meth:`DataFrame.plot.scatter` can now accept a categorical column as the argument to ``c`` (:issue:`12380`, :issue:`31357`)
- :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes (:issue:`35643`, :issue:`21266`, :issue:`39317`)
@@ -318,6 +319,7 @@ Deprecations
- Deprecated comparison of :class:`Timestamp` object with ``datetime.date`` objects. Instead of e.g. ``ts <= mydate`` use ``ts <= pd.Timestamp(mydate)`` or ``ts.date() <= mydate`` (:issue:`36131`)
- Deprecated :attr:`Rolling.win_type` returning ``"freq"`` (:issue:`38963`)
- Deprecated :attr:`Rolling.is_datetimelike` (:issue:`38963`)
+- Deprecated :class:`DataFrame` indexer for :meth:`Series.__setitem__` and :meth:`DataFrame.__setitem__` (:issue:`39004`)
- Deprecated :meth:`core.window.ewm.ExponentialMovingWindow.vol` (:issue:`39220`)
- Using ``.astype`` to convert between ``datetime64[ns]`` dtype and :class:`DatetimeTZDtype` is deprecated and will raise in a future version, use ``obj.tz_localize`` or ``obj.dt.tz_localize`` instead (:issue:`38622`)
- Deprecated casting ``datetime.date`` objects to ``datetime64`` when used as ``fill_value`` in :meth:`DataFrame.unstack`, :meth:`DataFrame.shift`, :meth:`Series.shift`, and :meth:`DataFrame.reindex`, pass ``pd.Timestamp(dateobj)`` instead (:issue:`39767`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index cfe16627d5c64..e322cb23eba95 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1421,6 +1421,15 @@ def _has_valid_setitem_indexer(self, indexer) -> bool:
if isinstance(indexer, dict):
raise IndexError("iloc cannot enlarge its target object")
+ if isinstance(indexer, ABCDataFrame):
+ warnings.warn(
+ "DataFrame indexer for .iloc is deprecated and will be removed in"
+ "a future version.\n"
+ "consider using .loc with a DataFrame indexer for automatic alignment.",
+ FutureWarning,
+ stacklevel=3,
+ )
+
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
@@ -1508,6 +1517,12 @@ def _get_list_axis(self, key, axis: int):
raise IndexError("positional indexers are out-of-bounds") from err
def _getitem_axis(self, key, axis: int):
+ if isinstance(key, ABCDataFrame):
+ raise IndexError(
+ "DataFrame indexer is not allowed for .iloc\n"
+ "Consider using .loc for automatic alignment."
+ )
+
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 43ffc9e8eaedd..d0fdf81121c71 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -1090,6 +1090,20 @@ def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame):
# GH#32257 we let numpy do validation, get their exception
float_frame.iloc[:, :, :] = 1
+ def test_iloc_frame_indexer(self):
+ # GH#39004
+ df = DataFrame({"a": [1, 2, 3]})
+ indexer = DataFrame({"a": [True, False, True]})
+ with tm.assert_produces_warning(FutureWarning):
+ df.iloc[indexer] = 1
+
+ msg = (
+ "DataFrame indexer is not allowed for .iloc\n"
+ "Consider using .loc for automatic alignment."
+ )
+ with pytest.raises(IndexError, match=msg):
+ df.iloc[indexer]
+
class TestILocSetItemDuplicateColumns:
def test_iloc_setitem_scalar_duplicate_columns(self):
| - [x] closes #39004
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39022 | 2021-01-07T19:05:34Z | 2021-03-02T23:22:00Z | 2021-03-02T23:21:59Z | 2021-03-04T21:59:03Z |
Backport PR #38997 on branch 1.2.x (REGR: errors='replace' when encoding/errors are not specified) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..baeca87b8c4f8 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
+- Fixed regression in :meth:`read_csv` and other read functions were the encoding error policy (``errors``) did not default to ``"replace"`` when no encoding was specified (:issue:`38989`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/common.py b/pandas/io/common.py
index c189c3046b4f3..e838e10a27d21 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -547,8 +547,7 @@ def get_handle(
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
- if encoding is None:
- encoding = "utf-8"
+ encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
@@ -635,6 +634,9 @@ def get_handle(
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
+ if errors is None and encoding_passed is None:
+ # ignore errors when no encoding is specified
+ errors = "replace"
# Encoding
handle = open(
handle,
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 34cb00e89ea0c..2ea944d9502b3 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -418,3 +418,11 @@ def test_is_fsspec_url():
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
+
+
+def test_default_errors():
+ # GH 38989
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+ tm.assert_frame_equal(pd.read_csv(file, skiprows=[0]), pd.DataFrame({"a": [1]}))
| Backport PR #38997: REGR: errors='replace' when encoding/errors are not specified | https://api.github.com/repos/pandas-dev/pandas/pulls/39021 | 2021-01-07T18:49:07Z | 2021-01-07T21:23:20Z | 2021-01-07T21:23:20Z | 2021-01-07T21:23:21Z |
Backport PR #39019 on branch 1.2.x (DOC: np.bool -> np.bool_) | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 717334bfe1299..90d65327ea980 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -144,6 +144,11 @@ repos:
\#\ type:\s?ignore(?!\[)
language: pygrep
types: [python]
+ - id: np-bool
+ name: Check for use of np.bool instead of np.bool_
+ entry: np\.bool[^_8]
+ language: pygrep
+ types_or: [python, cython, rst]
- id: no-os-remove
name: Check code for instances of os.remove
entry: os\.remove
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 6ce63ff8badca..6cc8e15786795 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -625,7 +625,7 @@ class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
- transitions = np.zeros(N, dtype=np.bool)
+ transitions = np.zeros(N, dtype=np.bool_)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index ffecaa222e1f9..8d38c12252df4 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -2229,7 +2229,7 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]})
- dft1 = dft1.astype({"a": np.bool, "c": np.float64})
+ dft1 = dft1.astype({"a": np.bool_, "c": np.float64})
dft1
dft1.dtypes
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 5a6f56388dee5..77791b4b7e491 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -1406,7 +1406,7 @@ Often it's useful to obtain the lower (or upper) triangular form of a correlatio
df = pd.DataFrame(np.random.random(size=(100, 5)))
corr_mat = df.corr()
- mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1)
+ mask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1)
corr_mat.where(mask)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 197738330efe1..e67769bc774b0 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -492,7 +492,7 @@ def test_float_types(self, np_type, path):
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
- # Test np.bool values read come back as float.
+ # Test np.bool8 and np.bool_ values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
| Backport PR #39019: DOC: np.bool -> np.bool_ | https://api.github.com/repos/pandas-dev/pandas/pulls/39020 | 2021-01-07T18:48:43Z | 2021-01-07T21:23:37Z | 2021-01-07T21:23:37Z | 2021-01-07T21:23:37Z |
DOC: np.bool -> np.bool_ | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 52f923c41cbd4..9601be40fdebb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -152,6 +152,11 @@ repos:
\#\ type:\s?ignore(?!\[)
language: pygrep
types: [python]
+ - id: np-bool
+ name: Check for use of np.bool instead of np.bool_
+ entry: np\.bool[^_8]
+ language: pygrep
+ types_or: [python, cython, rst]
- id: no-os-remove
name: Check code for instances of os.remove
entry: os\.remove
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index b4d9db95af163..806cf38ad90b6 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -627,7 +627,7 @@ class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
- transitions = np.zeros(N, dtype=np.bool)
+ transitions = np.zeros(N, dtype=np.bool_)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index ffecaa222e1f9..8d38c12252df4 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -2229,7 +2229,7 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]})
- dft1 = dft1.astype({"a": np.bool, "c": np.float64})
+ dft1 = dft1.astype({"a": np.bool_, "c": np.float64})
dft1
dft1.dtypes
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 92905836b763c..66b564838e5e2 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -1406,7 +1406,7 @@ Often it's useful to obtain the lower (or upper) triangular form of a correlatio
df = pd.DataFrame(np.random.random(size=(100, 5)))
corr_mat = df.corr()
- mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1)
+ mask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1)
corr_mat.where(mask)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index c930acd179330..b12413fbb56c6 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -492,7 +492,7 @@ def test_float_types(self, np_type, path):
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
- # Test np.bool values read come back as float.
+ # Test np.bool8 and np.bool_ values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
| xref #34848, #34835
a couple missed that are causing doc build failures with numpy-1.20.0rc2, https://github.com/pandas-dev/pandas/pull/36092/checks?check_run_id=1662818738
probably want to backport this to prevent possible future ci failures | https://api.github.com/repos/pandas-dev/pandas/pulls/39019 | 2021-01-07T15:52:25Z | 2021-01-07T18:47:08Z | 2021-01-07T18:47:08Z | 2021-01-07T18:59:32Z |
CLN: add typing to dtype arg in core/common.py (GH38808) | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 622d903b03579..a6514b5167460 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -8,13 +8,13 @@
import contextlib
from functools import partial
import inspect
-from typing import Any, Collection, Iterable, Iterator, List, Union, cast
+from typing import Any, Collection, Iterable, Iterator, List, Optional, Union, cast
import warnings
import numpy as np
from pandas._libs import lib
-from pandas._typing import AnyArrayLike, Scalar, T
+from pandas._typing import AnyArrayLike, NpDtype, Scalar, T
from pandas.compat.numpy import np_version_under1p18
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
@@ -195,7 +195,7 @@ def count_not_none(*args) -> int:
return sum(x is not None for x in args)
-def asarray_tuplesafe(values, dtype=None):
+def asarray_tuplesafe(values, dtype: Optional[NpDtype] = None) -> np.ndarray:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
@@ -218,7 +218,7 @@ def asarray_tuplesafe(values, dtype=None):
return result
-def index_labels_to_array(labels, dtype=None):
+def index_labels_to_array(labels, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Transform label or iterable of labels to array, for use in Index.
| Follow the issue - https://github.com/pandas-dev/pandas/issues/38808
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39018 | 2021-01-07T12:33:52Z | 2021-01-07T14:07:40Z | 2021-01-07T14:07:40Z | 2021-01-07T14:07:44Z |
DOC: Add whatsnew | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 9e557a0020f1e..3efb620f6ca65 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -201,7 +201,7 @@ Datetimelike
- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype or a ``timedelta`` object ouf of bounds for ``timedelta64[ns]`` dtype (:issue:`38792`, :issue:`38965`)
- Bug in :meth:`DatetimeIndex.intersection`, :meth:`DatetimeIndex.symmetric_difference`, :meth:`PeriodIndex.intersection`, :meth:`PeriodIndex.symmetric_difference` always returning object-dtype when operating with :class:`CategoricalIndex` (:issue:`38741`)
- Bug in :meth:`Series.where` incorrectly casting ``datetime64`` values to ``int64`` (:issue:`37682`)
--
+- Bug in :class:`Categorical` incorrectly typecasting ``datetime`` object to ``Timestamp`` (:issue:`38878`)
Timedelta
^^^^^^^^^
| - [x] closes #38878
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39014 | 2021-01-07T08:58:40Z | 2021-01-08T14:17:28Z | 2021-01-08T14:17:27Z | 2021-01-10T11:00:28Z |
BUG: read_csv raising ValueError for tru_values/false_values and boolean dtype | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..6f6b6743d8289 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -284,6 +284,7 @@ I/O
- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`)
- Bug in :func:`read_excel` forward filling :class:`MultiIndex` names with multiple header and index columns specified (:issue:`34673`)
- :func:`pandas.read_excel` now respects :func:``pandas.set_option`` (:issue:`34252`)
+- Bug in :func:`read_csv` not switching ``true_values`` and ``false_values`` for nullable ``boolean`` dtype (:issue:`34655`)
- Bug in :func:``read_json`` when ``orient="split"`` does not maintan numeric string index (:issue:`28556`)
Period
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 4995252d7aafd..a72a2ff8eaf28 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1084,11 +1084,18 @@ cdef class TextReader:
elif is_extension_array_dtype(dtype):
result, na_count = self._string_convert(i, start, end, na_filter,
na_hashset)
+
array_type = dtype.construct_array_type()
try:
# use _from_sequence_of_strings if the class defines it
- result = array_type._from_sequence_of_strings(result,
- dtype=dtype)
+ if is_bool_dtype(dtype):
+ true_values = [x.decode() for x in self.true_values]
+ false_values = [x.decode() for x in self.false_values]
+ result = array_type._from_sequence_of_strings(
+ result, dtype=dtype, true_values=true_values,
+ false_values=false_values)
+ else:
+ result = array_type._from_sequence_of_strings(result, dtype=dtype)
except NotImplementedError:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index bbbc0911b4846..2bc908186f7f4 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -257,6 +257,8 @@ class BooleanArray(BaseMaskedArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
+ _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
+ _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
@@ -282,14 +284,23 @@ def _from_sequence(
@classmethod
def _from_sequence_of_strings(
- cls, strings: List[str], *, dtype: Optional[Dtype] = None, copy: bool = False
+ cls,
+ strings: List[str],
+ *,
+ dtype: Optional[Dtype] = None,
+ copy: bool = False,
+ true_values: Optional[List[str]] = None,
+ false_values: Optional[List[str]] = None,
) -> "BooleanArray":
+ true_values_union = cls._TRUE_VALUES.union(true_values or [])
+ false_values_union = cls._FALSE_VALUES.union(false_values or [])
+
def map_string(s):
if isna(s):
return s
- elif s in ["True", "TRUE", "true", "1", "1.0"]:
+ elif s in true_values_union:
return True
- elif s in ["False", "FALSE", "false", "0", "0.0"]:
+ elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6e9cc18358153..ca817be5d2ff6 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1812,7 +1812,15 @@ def _cast_types(self, values, cast_type, column):
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
- return array_type._from_sequence_of_strings(values, dtype=cast_type)
+ if is_bool_dtype(cast_type):
+ return array_type._from_sequence_of_strings(
+ values,
+ dtype=cast_type,
+ true_values=self.true_values,
+ false_values=self.false_values,
+ )
+ else:
+ return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index ec1ccf009b8de..5ffd909d316bf 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -213,3 +213,25 @@ def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
)
val = df.iloc[0, 0]
assert val == numeric_decimal[1]
+
+
+def test_true_values_cast_to_bool(all_parsers):
+ # GH#34655
+ text = """a,b
+yes,xxx
+no,yyy
+1,zzz
+0,aaa
+ """
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO(text),
+ true_values=["yes"],
+ false_values=["no"],
+ dtype={"a": "boolean"},
+ )
+ expected = DataFrame(
+ {"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}
+ )
+ expected["a"] = expected["a"].astype("boolean")
+ tm.assert_frame_equal(result, expected)
| - [x] closes #34655
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I am not really experienced with cython, so I would appreciate feedback on the switiching function. This was not done previously in case of ea boolean dtype, hence why this was failing before. | https://api.github.com/repos/pandas-dev/pandas/pulls/39012 | 2021-01-07T00:06:13Z | 2021-01-09T22:18:23Z | 2021-01-09T22:18:22Z | 2021-01-09T22:22:25Z |
ENH: making value_counts stable/keeping original ordering | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index ff11ebc022ffb..2234b870ac9c0 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -356,6 +356,7 @@ Reshaping
- Bug in :func:`join` over :class:`MultiIndex` returned wrong result, when one of both indexes had only one level (:issue:`36909`)
- :meth:`merge_asof` raises ``ValueError`` instead of cryptic ``TypeError`` in case of non-numerical merge columns (:issue:`29130`)
- Bug in :meth:`DataFrame.join` not assigning values correctly when having :class:`MultiIndex` where at least one dimension is from dtype ``Categorical`` with non-alphabetically sorted categories (:issue:`38502`)
+- :meth:`Series.value_counts` returns keys in original order (:issue:`12679`, :issue:`11227`)
- Bug in :meth:`DataFrame.apply` would give incorrect results when used with a string argument and ``axis=1`` when the axis argument was not supported and now raises a ``ValueError`` instead (:issue:`39211`)
-
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 276f162545399..a3e72ed858392 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -19,13 +19,6 @@ cdef kh{{name}}_t to_kh{{name}}_t({{name}}_t val) nogil:
res.imag = val.imag
return res
-
-cdef {{name}}_t to_{{name}}(kh{{name}}_t val) nogil:
- cdef {{name}}_t res
- res.real = val.real
- res.imag = val.imag
- return res
-
{{endfor}}
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index f8f541235dcb7..b4da5a3c7fb09 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -6,26 +6,26 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
-# dtype, ttype, c_type, to_c_type, to_dtype
-dtypes = [('complex128', 'complex128', 'khcomplex128_t',
- 'to_khcomplex128_t', 'to_complex128'),
- ('complex64', 'complex64', 'khcomplex64_t',
- 'to_khcomplex64_t', 'to_complex64'),
- ('float64', 'float64', 'float64_t', '', ''),
- ('float32', 'float32', 'float32_t', '', ''),
- ('uint64', 'uint64', 'uint64_t', '', ''),
- ('uint32', 'uint32', 'uint32_t', '', ''),
- ('uint16', 'uint16', 'uint16_t', '', ''),
- ('uint8', 'uint8', 'uint8_t', '', ''),
- ('object', 'pymap', 'object', '', ''),
- ('int64', 'int64', 'int64_t', '', ''),
- ('int32', 'int32', 'int32_t', '', ''),
- ('int16', 'int16', 'int16_t', '', ''),
- ('int8', 'int8', 'int8_t', '', '')]
+# name, dtype, ttype, c_type, to_c_type
+dtypes = [('Complex128', 'complex128', 'complex128',
+ 'khcomplex128_t', 'to_khcomplex128_t'),
+ ('Complex64', 'complex64', 'complex64',
+ 'khcomplex64_t', 'to_khcomplex64_t'),
+ ('Float64', 'float64', 'float64', 'float64_t', ''),
+ ('Float32', 'float32', 'float32', 'float32_t', ''),
+ ('UInt64', 'uint64', 'uint64', 'uint64_t', ''),
+ ('UInt32', 'uint32', 'uint32', 'uint32_t', ''),
+ ('UInt16', 'uint16', 'uint16', 'uint16_t', ''),
+ ('UInt8', 'uint8', 'uint8', 'uint8_t', ''),
+ ('Object', 'object', 'pymap', 'object', ''),
+ ('Int64', 'int64', 'int64', 'int64_t', ''),
+ ('Int32', 'int32', 'int32', 'int32_t', ''),
+ ('Int16', 'int16', 'int16', 'int16_t', ''),
+ ('Int8', 'int8', 'int8', 'int8_t', '')]
}}
-{{for dtype, ttype, c_type, to_c_type, to_dtype in dtypes}}
+{{for name, dtype, ttype, c_type, to_c_type in dtypes}}
@cython.wraparound(False)
@@ -77,54 +77,77 @@ cdef build_count_table_{{dtype}}(const {{dtype}}_t[:] values,
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
+cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN):
{{else}}
cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{endif}}
cdef:
Py_ssize_t i = 0
+ Py_ssize_t n = len(values)
+ size_t unique_key_index = 0
+ size_t unique_key_count = 0
kh_{{ttype}}_t *table
- {{if dtype != 'object'}}
- {{dtype}}_t[:] result_keys
- int64_t[:] result_counts
- {{endif}}
-
# Don't use Py_ssize_t, since table.n_buckets is unsigned
khiter_t k
+ bint is_null
+
+ {{c_type}} val
+
+ int ret = 0
+
+ # we track the order in which keys are first seen (GH39009),
+ # khash-map isn't insertion-ordered, thus:
+ # table maps key to index_of_appearence
+ # result_keys maps index_of_appearence to key
+ # result_counts maps index_of_appearence to number of elements
+ result_keys = {{name}}Vector()
+ result_counts = Int64Vector()
table = kh_init_{{ttype}}()
+
{{if dtype == 'object'}}
- build_count_table_{{dtype}}(values, table, 1)
+ kh_resize_{{ttype}}(table, n // 10)
+
+ for i in range(n):
+ val = values[i]
+ is_null = checknull(val)
+ if not is_null or not dropna:
+ # all nas become the same representative:
+ if is_null:
+ val = navalue
+ k = kh_get_{{ttype}}(table, <PyObject*>val)
+ if k != table.n_buckets:
+ unique_key_index = table.vals[k]
+ result_counts.data.data[unique_key_index] += 1
+ else:
+ k = kh_put_{{ttype}}(table, <PyObject*>val, &ret)
+ table.vals[k] = unique_key_count
+ result_keys.append(val)
+ result_counts.append(1)
+ unique_key_count+=1
{{else}}
- build_count_table_{{dtype}}(values, table, dropna)
- {{endif}}
+ kh_resize_{{ttype}}(table, n)
- result_keys = np.empty(table.n_occupied, '{{dtype}}')
- result_counts = np.zeros(table.n_occupied, dtype=np.int64)
+ for i in range(n):
+ val = {{to_c_type}}(values[i])
- {{if dtype == 'object'}}
- for k in range(table.n_buckets):
- if kh_exist_{{ttype}}(table, k):
- result_keys[i] = <{{dtype}}>table.keys[k]
- result_counts[i] = table.vals[k]
- i += 1
- {{else}}
- with nogil:
- for k in range(table.n_buckets):
- if kh_exist_{{ttype}}(table, k):
- result_keys[i] = {{to_dtype}}(table.keys[k])
- result_counts[i] = table.vals[k]
- i += 1
+ if not is_nan_{{c_type}}(val) or not dropna:
+ k = kh_get_{{ttype}}(table, val)
+ if k != table.n_buckets:
+ unique_key_index = table.vals[k]
+ result_counts.data.data[unique_key_index] += 1
+ else:
+ k = kh_put_{{ttype}}(table, val, &ret)
+ table.vals[k] = unique_key_count
+ result_keys.append(val)
+ result_counts.append(1)
+ unique_key_count+=1
{{endif}}
kh_destroy_{{ttype}}(table)
- {{if dtype == 'object'}}
- return result_keys, result_counts
- {{else}}
- return np.asarray(result_keys), np.asarray(result_counts)
- {{endif}}
+ return result_keys.to_array(), result_counts.to_array()
@cython.wraparound(False)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ed7ae75117c5c..968b39088e684 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -866,11 +866,6 @@ def value_counts_arraylike(values, dropna: bool):
f = getattr(htable, f"value_count_{ndtype}")
keys, counts = f(values, dropna)
- mask = isna(values)
- if not dropna and mask.any() and not isna(keys).any():
- keys = np.insert(keys, 0, np.NaN)
- counts = np.insert(counts, 0, mask.sum())
-
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 631f67ced77dd..09e2e80f45b3d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1004,9 +1004,9 @@ def value_counts(
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
+ 1.0 1
2.0 1
4.0 1
- 1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
@@ -1015,9 +1015,9 @@ def value_counts(
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
+ 1.0 0.2
2.0 0.2
4.0 0.2
- 1.0 0.2
dtype: float64
**bins**
@@ -1039,10 +1039,10 @@ def value_counts(
>>> s.value_counts(dropna=False)
3.0 2
+ 1.0 1
2.0 1
- NaN 1
4.0 1
- 1.0 1
+ NaN 1
dtype: int64
"""
return value_counts(
diff --git a/pandas/tests/arrays/boolean/test_function.py b/pandas/tests/arrays/boolean/test_function.py
index 0f8743489b412..d90655b6e2820 100644
--- a/pandas/tests/arrays/boolean/test_function.py
+++ b/pandas/tests/arrays/boolean/test_function.py
@@ -77,18 +77,18 @@ def test_ufunc_reduce_raises(values):
def test_value_counts_na():
arr = pd.array([True, False, pd.NA], dtype="boolean")
result = arr.value_counts(dropna=False)
- expected = pd.Series([1, 1, 1], index=[False, True, pd.NA], dtype="Int64")
+ expected = pd.Series([1, 1, 1], index=[True, False, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
- expected = pd.Series([1, 1], index=[False, True], dtype="Int64")
+ expected = pd.Series([1, 1], index=[True, False], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize():
s = pd.Series([True, False, pd.NA], dtype="boolean")
result = s.value_counts(normalize=True)
- expected = pd.Series([1, 1], index=[False, True], dtype="Float64") / 2
+ expected = pd.Series([1, 1], index=[True, False], dtype="Float64") / 2
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 5365929213503..d14de990d8268 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -497,7 +497,7 @@ def test_value_counts_na(dtype, request):
arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype)
result = arr.value_counts(dropna=False)
- expected = pd.Series([2, 1, 1], index=["a", pd.NA, "b"], dtype="Int64")
+ expected = pd.Series([2, 1, 1], index=["a", "b", pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index ea44e5d477fc6..587d3c466c631 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -288,7 +288,7 @@ def test_value_counts_preserves_tz(self):
arr[-2] = pd.NaT
result = arr.value_counts()
- expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]])
+ expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["pad", "backfill"])
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index ed1c3fcce378c..15bafb7a835ba 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -371,7 +371,7 @@ def test_describe_does_not_raise_error_for_dictlike_elements(self):
# GH#32409
df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}])
expected = DataFrame(
- {"test": [2, 2, {"a": "2"}, 1]}, index=["count", "unique", "top", "freq"]
+ {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"]
)
result = df.describe()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py
index 4f650807afd30..5bf652c206a5f 100644
--- a/pandas/tests/libs/test_hashtable.py
+++ b/pandas/tests/libs/test_hashtable.py
@@ -272,6 +272,15 @@ def test_value_count(self, dtype, type_suffix, writable):
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
+ def test_value_count_stable(self, dtype, type_suffix, writable):
+ # GH12679
+ value_count = get_ht_function("value_count", type_suffix)
+ values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
+ values.flags.writeable = writable
+ keys, counts = value_count(values, False)
+ tm.assert_numpy_array_equal(keys, values)
+ assert np.all(counts == 1)
+
def test_duplicated_first(self, dtype, type_suffix, writable):
N = 100
duplicated = get_ht_function("duplicated", type_suffix)
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index f22b1be672190..505b879660ff1 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -185,7 +185,7 @@ def test_value_counts_categorical_with_nan(self):
(
Series([False, True, True, pd.NA]),
False,
- Series([2, 1, 1], index=[True, pd.NA, False]),
+ Series([2, 1, 1], index=[True, False, pd.NA]),
),
(
Series([False, True, True, pd.NA]),
@@ -195,7 +195,7 @@ def test_value_counts_categorical_with_nan(self):
(
Series(range(3), index=[True, False, np.nan]).index,
False,
- Series([1, 1, 1], index=[pd.NA, False, True]),
+ Series([1, 1, 1], index=[True, False, np.nan]),
),
],
)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index fb982c02acd99..88757b96085aa 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -6,7 +6,7 @@
import pytest
from pandas._libs import algos as libalgos, hashtable as ht
-from pandas.compat import IS64, np_array_datetime64_compat
+from pandas.compat import np_array_datetime64_compat
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
@@ -1272,12 +1272,10 @@ def test_value_counts_uint64(self):
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2 ** 63], dtype=object)
- expected = Series([1, 1], index=[2 ** 63, -1])
+ expected = Series([1, 1], index=[-1, 2 ** 63])
result = algos.value_counts(arr)
- # 32-bit linux has a different ordering
- if IS64:
- tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
class TestDuplicated:
| closes #12679
closes #11227
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
The order of the returned keys for `value_counts` aren't arbitrary (i.e. depending on the used hash function) but are original ordering (when sorted this applies for the keys with the same number of values).
| https://api.github.com/repos/pandas-dev/pandas/pulls/39009 | 2021-01-06T21:07:00Z | 2021-01-22T21:42:34Z | 2021-01-22T21:42:34Z | 2021-01-27T19:05:04Z |
CLN: inspect_excel_format | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b0ec8a1082a0e..8911696230c03 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -2,10 +2,10 @@
import datetime
from distutils.version import LooseVersion
import inspect
-from io import BufferedIOBase, BytesIO, RawIOBase
+from io import BytesIO
import os
from textwrap import fill
-from typing import IO, Any, Dict, Mapping, Optional, Union, cast
+from typing import Any, Dict, Mapping, Optional, Union, cast
import warnings
import zipfile
@@ -906,24 +906,18 @@ def close(self):
@doc(storage_options=_shared_docs["storage_options"])
def inspect_excel_format(
- path: Optional[str] = None,
- content: Union[None, BufferedIOBase, RawIOBase, bytes] = None,
+ content_or_path: FilePathOrBuffer,
storage_options: StorageOptions = None,
) -> str:
"""
Inspect the path or content of an excel file and get its format.
- At least one of path or content must be not None. If both are not None,
- content will take precedence.
-
Adopted from xlrd: https://github.com/python-excel/xlrd.
Parameters
----------
- path : str, optional
- Path to file to inspect. May be a URL.
- content : file-like object, optional
- Content of file to inspect.
+ content_or_path : str or file-like object
+ Path to file or content of file to inspect. May be a URL.
{storage_options}
Returns
@@ -938,12 +932,8 @@ def inspect_excel_format(
BadZipFile
If resulting stream does not have an XLS signature and is not a valid zipfile.
"""
- content_or_path: Union[None, str, BufferedIOBase, RawIOBase, IO[bytes]]
- if isinstance(content, bytes):
- content_or_path = BytesIO(content)
- else:
- content_or_path = content or path
- assert content_or_path is not None
+ if isinstance(content_or_path, bytes):
+ content_or_path = BytesIO(content_or_path)
with get_handle(
content_or_path, "rb", storage_options=storage_options, is_text=False
@@ -1069,7 +1059,7 @@ def __init__(
ext = "xls"
else:
ext = inspect_excel_format(
- content=path_or_buffer, storage_options=storage_options
+ content_or_path=path_or_buffer, storage_options=storage_options
)
if engine is None:
| - [x] closes #38823
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
cc @rhshadrach | https://api.github.com/repos/pandas-dev/pandas/pulls/39008 | 2021-01-06T21:05:55Z | 2021-01-08T21:39:23Z | 2021-01-08T21:39:23Z | 2021-01-08T22:11:48Z |
DOC: Clarify index_col behavior for read_csv | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 0c09ea3e0e2fc..8c7e01dd999d3 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -112,8 +112,9 @@ index_col : int, str, sequence of int / str, or False, default ``None``
The default value of ``None`` instructs pandas to guess. If the number of
fields in the column header row is equal to the number of fields in the body
- of the data file, then a default index is used. If it is one larger, then
- the first field is used as an index.
+ of the data file, then a default index is used. If it is larger, then
+ the first columns are used as index so that the remaining number of fields in
+ the body are equal to the number of fields in the header.
usecols : list-like or callable, default ``None``
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
| - [x] closes #38830
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/39006 | 2021-01-06T19:53:22Z | 2021-01-06T22:56:26Z | 2021-01-06T22:56:26Z | 2021-01-06T23:01:11Z |
TST: add note about scope of base extension tests to all files | diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 0fde1e8a2fdb8..281bbc21e3106 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 29790d14f93cc..1f0181eec8830 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 817881e00fa99..30dd6193846a4 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index ffd56b9c23bc8..86f9080571459 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index d49c4c5cf4889..d0a3ef17afdbc 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import string
import numpy as np
| We already had this note in about half of the files in this directory, copied it to include in the other files as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/39003 | 2021-01-06T15:47:35Z | 2021-01-06T18:34:18Z | 2021-01-06T18:34:18Z | 2021-01-12T08:05:04Z |
Remove Scatter and Hexbin from Series plot documentation | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e891017b37bc1..795239ab78c6e 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -630,8 +630,8 @@ class PlotAccessor(PandasObject):
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- - 'scatter' : scatter plot
- - 'hexbin' : hexbin plot.
+ - 'scatter' : scatter plot (DataFrame only)
+ - 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
| - [x] closes #38976
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] Remove scatter and hexbin entry from https://github.com/pandas-dev/pandas/blob/v1.2.0/pandas/plotting/_core.py#L603-L1708
| https://api.github.com/repos/pandas-dev/pandas/pulls/39000 | 2021-01-06T11:56:45Z | 2021-01-06T18:36:17Z | 2021-01-06T18:36:16Z | 2021-01-06T18:36:21Z |
DOC: Update contributing.rst | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index b810c71e3daa6..90ecee8cf9312 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -698,6 +698,12 @@ to run its checks with::
without needing to have done ``pre-commit install`` beforehand.
+If you want to run checks on all recently commited files on upstream/master you can use::
+
+ pre-commit run --from-ref=upstream/master --to-ref=HEAD --all-files
+
+without needing to have done ``pre-commit install`` beforehand.
+
.. note::
If you have conflicting installations of ``virtualenv``, then you may get an
| - [x] closes #38938
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38998 | 2021-01-06T10:52:38Z | 2021-01-06T15:02:41Z | 2021-01-06T15:02:41Z | 2021-01-10T11:03:10Z |
REGR: errors='replace' when encoding/errors are not specified | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..baeca87b8c4f8 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
+- Fixed regression in :meth:`read_csv` and other read functions were the encoding error policy (``errors``) did not default to ``"replace"`` when no encoding was specified (:issue:`38989`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 642684ca61480..8f04724773a8a 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -553,8 +553,7 @@ def get_handle(
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
- if encoding is None:
- encoding = "utf-8"
+ encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
@@ -641,6 +640,9 @@ def get_handle(
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
+ if errors is None and encoding_passed is None:
+ # ignore errors when no encoding is specified
+ errors = "replace"
# Encoding
handle = open(
handle,
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index d445bece593d1..725c14f410357 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -419,3 +419,11 @@ def test_is_fsspec_url():
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
+
+
+def test_default_errors():
+ # GH 38989
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+ tm.assert_frame_equal(pd.read_csv(file, skiprows=[0]), pd.DataFrame({"a": [1]}))
| - [x] closes #38989
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
Should 1.3 use `errors='replace'` when no `encoding/errors` are specified or use `errors=None` (strict)? | https://api.github.com/repos/pandas-dev/pandas/pulls/38997 | 2021-01-06T06:02:23Z | 2021-01-07T18:48:20Z | 2021-01-07T18:48:20Z | 2021-01-07T19:07:02Z |
ENH: Add table-wise numba rolling to other agg funcions | diff --git a/ci/deps/azure-37-slow.yaml b/ci/deps/azure-37-slow.yaml
index 05b33fa351ac9..5d097e397992c 100644
--- a/ci/deps/azure-37-slow.yaml
+++ b/ci/deps/azure-37-slow.yaml
@@ -36,3 +36,4 @@ dependencies:
- xlwt
- moto
- flask
+ - numba
diff --git a/ci/deps/azure-38-slow.yaml b/ci/deps/azure-38-slow.yaml
index fd40f40294b7f..0a4107917f01a 100644
--- a/ci/deps/azure-38-slow.yaml
+++ b/ci/deps/azure-38-slow.yaml
@@ -34,3 +34,4 @@ dependencies:
- xlwt
- moto
- flask
+ - numba
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..9e557a0020f1e 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -37,7 +37,7 @@ For example:
:class:`Rolling` and :class:`Expanding` now support a ``method`` argument with a
``'table'`` option that performs the windowing operation over an entire :class:`DataFrame`.
-See ref:`window.overview` for performance and functional benefits. (:issue:`15095`)
+See ref:`window.overview` for performance and functional benefits. (:issue:`15095`, :issue:`38995`)
.. _whatsnew_130.enhancements.other:
diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py
index 46b47b7e988c4..aa69d4fa675cd 100644
--- a/pandas/core/window/numba_.py
+++ b/pandas/core/window/numba_.py
@@ -1,3 +1,4 @@
+import functools
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
@@ -220,3 +221,21 @@ def roll_table(
return result
return roll_table
+
+
+# This function will no longer be needed once numba supports
+# axis for all np.nan* agg functions
+# https://github.com/numba/numba/issues/1269
+@functools.lru_cache(maxsize=None)
+def generate_manual_numpy_nan_agg_with_axis(nan_func):
+ numba = import_optional_dependency("numba")
+
+ @numba.jit(nopython=True, nogil=True, parallel=True)
+ def nan_agg_with_axis(table):
+ result = np.empty(table.shape[1])
+ for i in numba.prange(table.shape[1]):
+ partition = table[:, i]
+ result[i] = nan_func(partition)
+ return result
+
+ return nan_agg_with_axis
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index a4612a4c8ed5d..393c517a63660 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -65,6 +65,7 @@
VariableWindowIndexer,
)
from pandas.core.window.numba_ import (
+ generate_manual_numpy_nan_agg_with_axis,
generate_numba_apply_func,
generate_numba_table_func,
)
@@ -1378,16 +1379,15 @@ def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("sum", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nansum with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nansum)
+ else:
+ func = np.nansum
+
return self.apply(
- np.nansum,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="sum", **kwargs)
@@ -1424,16 +1424,15 @@ def max(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("max", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmax with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmax)
+ else:
+ func = np.nanmax
+
return self.apply(
- np.nanmax,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_max
return self._apply(window_func, name="max", **kwargs)
@@ -1496,16 +1495,15 @@ def min(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("min", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmin with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmin)
+ else:
+ func = np.nanmin
+
return self.apply(
- np.nanmin,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_min
return self._apply(window_func, name="min", **kwargs)
@@ -1514,16 +1512,15 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("mean", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmean with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmean)
+ else:
+ func = np.nanmean
+
return self.apply(
- np.nanmean,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_mean
return self._apply(window_func, name="mean", **kwargs)
@@ -1584,16 +1581,15 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
def median(self, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmedian with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian)
+ else:
+ func = np.nanmedian
+
return self.apply(
- np.nanmedian,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_median_c
return self._apply(window_func, name="median", **kwargs)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 9d9c216801d73..173e39ef42908 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -163,6 +163,7 @@ def test_invalid_kwargs_nopython():
@td.skip_if_no("numba", "0.46.0")
+@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestTableMethod:
@@ -177,9 +178,6 @@ def f(x):
f, engine="numba", raw=True
)
- @pytest.mark.xfail(
- raises=NotImplementedError, reason="method='table' is not supported."
- )
def test_table_method_rolling_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
@@ -247,9 +245,6 @@ def f(x):
)
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(
- raises=NotImplementedError, reason="method='table' is not supported."
- )
def test_table_method_expanding_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
Timings for a wide table
```
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(10, 10**5))
roll_single = df.rolling(2, method="single")
roll_table = df.rolling(2, method="table")
%timeit roll_single.mean()
4.92 s ± 463 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
roll_single.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
%timeit roll_single.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
5.72 s ± 430 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
roll_table.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
%timeit roll_table.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
10.3 ms ± 1.23 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/38995 | 2021-01-06T05:44:52Z | 2021-01-07T21:24:06Z | 2021-01-07T21:24:06Z | 2021-05-26T15:54:22Z |
DOC: elaborate on copies vs in place operations in comparison docs | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index 2b316cccb7fc9..54b45dc20db20 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -62,6 +62,12 @@ see the :ref:`indexing documentation<indexing>` for much more on how to use an
``Index`` effectively.
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
index e9d687bc07999..c92d2a660d753 100644
--- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
+++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
@@ -65,6 +65,13 @@ particular row don't change.
See the :ref:`indexing documentation<indexing>` for much more on how to use an ``Index``
effectively.
+
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 890f0cbe50424..fcfa03a8bce5f 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -23,6 +23,13 @@ structure.
tips = pd.read_csv(url)
tips
+
+Copies vs. in place operations
+------------------------------
+
+.. include:: includes/copies.rst
+
+
SELECT
------
In SQL, selection is done using a comma-separated list of columns you'd like to select (or a ``*``
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index 43cb775b5461d..94c45adcccc82 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -61,6 +61,12 @@ see the :ref:`indexing documentation<indexing>` for much more on how to use an
``Index`` effectively.
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/includes/column_selection.rst b/doc/source/getting_started/comparison/includes/column_selection.rst
index b925af1294f54..071645c9718cb 100644
--- a/doc/source/getting_started/comparison/includes/column_selection.rst
+++ b/doc/source/getting_started/comparison/includes/column_selection.rst
@@ -1,5 +1,4 @@
-The same operations are expressed in pandas below. Note that these operations do not happen in
-place. To make these changes persist, assign the operation back to a variable.
+The same operations are expressed in pandas below.
Keep certain columns
''''''''''''''''''''
diff --git a/doc/source/getting_started/comparison/includes/copies.rst b/doc/source/getting_started/comparison/includes/copies.rst
new file mode 100644
index 0000000000000..08ccd47624932
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/copies.rst
@@ -0,0 +1,23 @@
+Most pandas operations return copies of the ``Series``/``DataFrame``. To make the changes "stick",
+you'll need to either assign to a new variable:
+
+ .. code-block:: python
+
+ sorted_df = df.sort_values("col1")
+
+
+or overwrite the original one:
+
+ .. code-block:: python
+
+ df = df.sort_values("col1")
+
+.. note::
+
+ You will see an ``inplace=True`` keyword argument available for some methods:
+
+ .. code-block:: python
+
+ df.sort_values("col1", inplace=True)
+
+ Its use is discouraged. :ref:`More information. <indexing.view_versus_copy>`
| <img width="782" alt="Screen Shot 2021-01-06 at 12 15 05 AM" src="https://user-images.githubusercontent.com/86842/103731907-437e7280-4fb4-11eb-9e6d-e702f117656c.png">
- [ ] ~~closes #xxxx~~
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38994 | 2021-01-06T05:20:24Z | 2021-01-08T14:14:30Z | 2021-01-08T14:14:29Z | 2021-01-10T01:13:26Z |
DOC: add more sections to spreadsheet comparison | diff --git a/doc/source/_static/spreadsheets/conditional.png b/doc/source/_static/spreadsheets/conditional.png
new file mode 100644
index 0000000000000..d518ff19dc760
Binary files /dev/null and b/doc/source/_static/spreadsheets/conditional.png differ
diff --git a/doc/source/_static/spreadsheets/filter.png b/doc/source/_static/spreadsheets/filter.png
new file mode 100644
index 0000000000000..b4c929793ca44
Binary files /dev/null and b/doc/source/_static/spreadsheets/filter.png differ
diff --git a/doc/source/_static/spreadsheets/find.png b/doc/source/_static/spreadsheets/find.png
new file mode 100644
index 0000000000000..223b2e6fc762f
Binary files /dev/null and b/doc/source/_static/spreadsheets/find.png differ
diff --git a/doc/source/_static/logo_excel.svg b/doc/source/_static/spreadsheets/logo_excel.svg
similarity index 100%
rename from doc/source/_static/logo_excel.svg
rename to doc/source/_static/spreadsheets/logo_excel.svg
diff --git a/doc/source/_static/excel_pivot.png b/doc/source/_static/spreadsheets/pivot.png
similarity index 100%
rename from doc/source/_static/excel_pivot.png
rename to doc/source/_static/spreadsheets/pivot.png
diff --git a/doc/source/_static/spreadsheets/sort.png b/doc/source/_static/spreadsheets/sort.png
new file mode 100644
index 0000000000000..253f2f3bfb9ba
Binary files /dev/null and b/doc/source/_static/spreadsheets/sort.png differ
diff --git a/doc/source/_static/spreadsheets/vlookup.png b/doc/source/_static/spreadsheets/vlookup.png
new file mode 100644
index 0000000000000..e96da01da1eeb
Binary files /dev/null and b/doc/source/_static/spreadsheets/vlookup.png differ
diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
index 7b779b02e20f8..e9d687bc07999 100644
--- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
+++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
@@ -52,9 +52,12 @@ pandas, if no index is specified, a :class:`~pandas.RangeIndex` is used by defau
second row = 1, and so on), analogous to row headings/numbers in spreadsheets.
In pandas, indexes can be set to one (or multiple) unique values, which is like having a column that
-use use as the row identifier in a worksheet. Unlike spreadsheets, these ``Index`` values can actually be
-used to reference the rows. For example, in spreadsheets, you would reference the first row as ``A1:Z1``,
-while in pandas you could use ``populations.loc['Chicago']``.
+is used as the row identifier in a worksheet. Unlike most spreadsheets, these ``Index`` values can
+actually be used to reference the rows. (Note that `this can be done in Excel with structured
+references
+<https://support.microsoft.com/en-us/office/using-structured-references-with-excel-tables-f5ed2452-2337-4f71-bed3-c8ae6d2b276e>`_.)
+For example, in spreadsheets, you would reference the first row as ``A1:Z1``, while in pandas you
+could use ``populations.loc['Chicago']``.
Index values are also persistent, so if you re-order the rows in a ``DataFrame``, the label for a
particular row don't change.
@@ -62,11 +65,18 @@ particular row don't change.
See the :ref:`indexing documentation<indexing>` for much more on how to use an ``Index``
effectively.
-Commonly used spreadsheet functionalities
------------------------------------------
+Data input / output
+-------------------
-Importing data
-~~~~~~~~~~~~~~
+Constructing a DataFrame from values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In a spreadsheet, `values can be typed directly into cells <https://support.microsoft.com/en-us/office/enter-data-manually-in-worksheet-cells-c798181d-d75a-41b1-92ad-6c0800f80038>`_.
+
+.. include:: includes/construct_dataframe.rst
+
+Reading external data
+~~~~~~~~~~~~~~~~~~~~~
Both `Excel <https://support.microsoft.com/en-us/office/import-data-from-external-data-sources-power-query-be4330b3-5356-486c-a168-b68e9e616f5a>`__
and :ref:`pandas <10min_tut_02_read_write>` can import data from various sources in various
@@ -96,6 +106,248 @@ In pandas, you pass the URL or local path of the CSV file to :func:`~pandas.read
tips = pd.read_csv(url)
tips
+Like `Excel's Text Import Wizard <https://support.microsoft.com/en-us/office/text-import-wizard-c5b02af6-fda1-4440-899f-f78bafe41857>`_,
+``read_csv`` can take a number of parameters to specify how the data should be parsed. For
+example, if the data was instead tab delimited, and did not have column names, the pandas command
+would be:
+
+.. code-block:: python
+
+ tips = pd.read_csv("tips.csv", sep="\t", header=None)
+
+ # alternatively, read_table is an alias to read_csv with tab delimiter
+ tips = pd.read_table("tips.csv", header=None)
+
+
+Limiting output
+~~~~~~~~~~~~~~~
+
+Spreadsheet programs will only show one screenful of data at a time and then allow you to scroll, so
+there isn't really a need to limit output. In pandas, you'll need to put a little more thought into
+controlling how your ``DataFrame``\s are displayed.
+
+.. include:: includes/limit.rst
+
+
+Exporting data
+~~~~~~~~~~~~~~
+
+By default, desktop spreadsheet software will save to its respective file format (``.xlsx``, ``.ods``, etc). You can, however, `save to other file formats <https://support.microsoft.com/en-us/office/save-a-workbook-in-another-file-format-6a16c862-4a36-48f9-a300-c2ca0065286e>`_.
+
+:ref:`pandas can create Excel files <io.excel_writer>`, :ref:`CSV <io.store_in_csv>`, or :ref:`a number of other formats <io>`.
+
+Data operations
+---------------
+
+Operations on columns
+~~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, `formulas
+<https://support.microsoft.com/en-us/office/overview-of-formulas-in-excel-ecfdc708-9162-49e8-b993-c311f47ca173>`_
+are often created in individual cells and then `dragged
+<https://support.microsoft.com/en-us/office/copy-a-formula-by-dragging-the-fill-handle-in-excel-for-mac-dd928259-622b-473f-9a33-83aa1a63e218>`_
+into other cells to compute them for other columns. In pandas, you're able to do operations on whole
+columns directly.
+
+.. include:: includes/column_operations.rst
+
+Note that we aren't having to tell it to do that subtraction cell-by-cell — pandas handles that for
+us. See :ref:`how to create new columns derived from existing columns <10min_tut_05_columns>`.
+
+
+Filtering
+~~~~~~~~~
+
+`In Excel, filtering is done through a graphical menu. <https://support.microsoft.com/en-us/office/filter-data-in-a-range-or-table-01832226-31b5-4568-8806-38c37dcc180e>`_
+
+.. image:: ../../_static/spreadsheets/filter.png
+ :alt: Screenshot showing filtering of the total_bill column to values greater than 10
+ :align: center
+
+.. include:: includes/filtering.rst
+
+If/then logic
+~~~~~~~~~~~~~
+
+Let's say we want to make a ``bucket`` column with values of ``low`` and ``high``, based on whether
+the ``total_bill`` is less or more than $10.
+
+In spreadsheets, logical comparison can be done with `conditional formulas
+<https://support.microsoft.com/en-us/office/create-conditional-formulas-ca916c57-abd8-4b44-997c-c309b7307831>`_.
+We'd use a formula of ``=IF(A2 < 10, "low", "high")``, dragged to all cells in a new ``bucket``
+column.
+
+.. image:: ../../_static/spreadsheets/conditional.png
+ :alt: Screenshot showing the formula from above in a bucket column of the tips spreadsheet
+ :align: center
+
+.. include:: includes/if_then.rst
+
+Date functionality
+~~~~~~~~~~~~~~~~~~
+
+*This section will refer to "dates", but timestamps are handled similarly.*
+
+We can think of date functionality in two parts: parsing, and output. In spreadsheets, date values
+are generally parsed automatically, though there is a `DATEVALUE
+<https://support.microsoft.com/en-us/office/datevalue-function-df8b07d4-7761-4a93-bc33-b7471bbff252>`_
+function if you need it. In pandas, you need to explicitly convert plain text to datetime objects,
+either :ref:`while reading from a CSV <io.read_csv_table.datetime>` or :ref:`once in a DataFrame
+<10min_tut_09_timeseries.properties>`.
+
+Once parsed, spreadsheets display the dates in a default format, though `the format can be changed
+<https://support.microsoft.com/en-us/office/format-a-date-the-way-you-want-8e10019e-d5d8-47a1-ba95-db95123d273e>`_.
+In pandas, you'll generally want to keep dates as ``datetime`` objects while you're doing
+calculations with them. Outputting *parts* of dates (such as the year) is done through `date
+functions
+<https://support.microsoft.com/en-us/office/date-and-time-functions-reference-fd1b5961-c1ae-4677-be58-074152f97b81>`_
+in spreadsheets, and :ref:`datetime properties <10min_tut_09_timeseries.properties>` in pandas.
+
+Given ``date1`` and ``date2`` in columns ``A`` and ``B`` of a spreadsheet, you might have these
+formulas:
+
+.. list-table::
+ :header-rows: 1
+ :widths: auto
+
+ * - column
+ - formula
+ * - ``date1_year``
+ - ``=YEAR(A2)``
+ * - ``date2_month``
+ - ``=MONTH(B2)``
+ * - ``date1_next``
+ - ``=DATE(YEAR(A2),MONTH(A2)+1,1)``
+ * - ``months_between``
+ - ``=DATEDIF(A2,B2,"M")``
+
+The equivalent pandas operations are shown below.
+
+.. include:: includes/time_date.rst
+
+See :ref:`timeseries` for more details.
+
+
+Selection of columns
+~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, you can select columns you want by:
+
+- `Hiding columns <https://support.microsoft.com/en-us/office/hide-or-show-rows-or-columns-659c2cad-802e-44ee-a614-dde8443579f8>`_
+- `Deleting columns <https://support.microsoft.com/en-us/office/insert-or-delete-rows-and-columns-6f40e6e4-85af-45e0-b39d-65dd504a3246>`_
+- `Referencing a range <https://support.microsoft.com/en-us/office/create-or-change-a-cell-reference-c7b8b95d-c594-4488-947e-c835903cebaa>`_ from one worksheet into another
+
+Since spreadsheet columns are typically `named in a header row
+<https://support.microsoft.com/en-us/office/turn-excel-table-headers-on-or-off-c91d1742-312c-4480-820f-cf4b534c8b3b>`_,
+renaming a column is simply a matter of changing the text in that first cell.
+
+.. include:: includes/column_selection.rst
+
+
+Sorting by values
+~~~~~~~~~~~~~~~~~
+
+Sorting in spreadsheets is accomplished via `the sort dialog <https://support.microsoft.com/en-us/office/sort-data-in-a-range-or-table-62d0b95d-2a90-4610-a6ae-2e545c4a4654>`_.
+
+.. image:: ../../_static/spreadsheets/sort.png
+ :alt: Screenshot of dialog from Excel showing sorting by the sex then total_bill columns
+ :align: center
+
+.. include:: includes/sorting.rst
+
+String processing
+-----------------
+
+Finding length of string
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, the number of characters in text can be found with the `LEN
+<https://support.microsoft.com/en-us/office/len-lenb-functions-29236f94-cedc-429d-affd-b5e33d2c67cb>`_
+function. This can be used with the `TRIM
+<https://support.microsoft.com/en-us/office/trim-function-410388fa-c5df-49c6-b16c-9e5630b479f9>`_
+function to remove extra whitespace.
+
+::
+
+ =LEN(TRIM(A2))
+
+.. include:: includes/length.rst
+
+Note this will still include multiple spaces within the string, so isn't 100% equivalent.
+
+
+Finding position of substring
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `FIND
+<https://support.microsoft.com/en-us/office/find-findb-functions-c7912941-af2a-4bdf-a553-d0d89b0a0628>`_
+spreadsheet function returns the position of a substring, with the first character being ``1``.
+
+.. image:: ../../_static/spreadsheets/sort.png
+ :alt: Screenshot of FIND formula being used in Excel
+ :align: center
+
+.. include:: includes/find_substring.rst
+
+
+Extracting substring by position
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Spreadsheets have a `MID
+<https://support.microsoft.com/en-us/office/mid-midb-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028>`_
+formula for extracting a substring from a given position. To get the first character::
+
+ =MID(A2,1,1)
+
+.. include:: includes/extract_substring.rst
+
+
+Extracting nth word
+~~~~~~~~~~~~~~~~~~~
+
+In Excel, you might use the `Text to Columns Wizard
+<https://support.microsoft.com/en-us/office/split-text-into-different-columns-with-the-convert-text-to-columns-wizard-30b14928-5550-41f5-97ca-7a3e9c363ed7>`_
+for splitting text and retrieving a specific column. (Note `it's possible to do so through a formula
+as well <https://exceljet.net/formula/extract-nth-word-from-text-string>`_.)
+
+.. include:: includes/nth_word.rst
+
+
+Changing case
+~~~~~~~~~~~~~
+
+Spreadsheets provide `UPPER, LOWER, and PROPER functions
+<https://support.microsoft.com/en-us/office/change-the-case-of-text-01481046-0fa7-4f3b-a693-496795a7a44d>`_
+for converting text to upper, lower, and title case, respectively.
+
+.. include:: includes/case.rst
+
+
+Merging
+-------
+
+.. include:: includes/merge_setup.rst
+
+In Excel, there are `merging of tables can be done through a VLOOKUP
+<https://support.microsoft.com/en-us/office/how-can-i-merge-two-or-more-tables-c80a9fce-c1ab-4425-bb96-497dd906d656>`_.
+
+.. image:: ../../_static/spreadsheets/vlookup.png
+ :alt: Screenshot showing a VLOOKUP formula between two tables in Excel, with some values being filled in and others with "#N/A"
+ :align: center
+
+.. include:: includes/merge.rst
+
+``merge`` has a number of advantages over ``VLOOKUP``:
+
+* The lookup value doesn't need to be the first column of the lookup table
+* If multiple rows are matched, there will be one row for each match, instead of just the first
+* It will include all columns from the lookup table, instead of just a single specified column
+* It supports :ref:`more complex join operations <merging.join>`
+
+
+Other considerations
+--------------------
+
Fill Handle
~~~~~~~~~~~
@@ -117,21 +369,6 @@ This can be achieved by creating a series and assigning it to the desired cells.
df
-Filters
-~~~~~~~
-
-Filters can be achieved by using slicing.
-
-The examples filter by 0 on column AAA, and also show how to filter by multiple
-values.
-
-.. ipython:: python
-
- df[df.AAA == 0]
-
- df[(df.AAA == 0) | (df.AAA == 2)]
-
-
Drop Duplicates
~~~~~~~~~~~~~~~
@@ -152,7 +389,6 @@ This is supported in pandas via :meth:`~DataFrame.drop_duplicates`.
df.drop_duplicates(["class", "student_count"])
-
Pivot Tables
~~~~~~~~~~~~
@@ -162,7 +398,8 @@ let's find the average gratuity by size of the party and sex of the server.
In Excel, we use the following configuration for the PivotTable:
-.. image:: ../../_static/excel_pivot.png
+.. image:: ../../_static/spreadsheets/pivot.png
+ :alt: Screenshot showing a PivotTable in Excel, using sex as the column, size as the rows, then average tip as the values
:align: center
The equivalent in pandas:
@@ -173,81 +410,34 @@ The equivalent in pandas:
tips, values="tip", index=["size"], columns=["sex"], aggfunc=np.average
)
-Formulas
-~~~~~~~~
-In spreadsheets, `formulas <https://support.microsoft.com/en-us/office/overview-of-formulas-in-excel-ecfdc708-9162-49e8-b993-c311f47ca173>`_
-are often created in individual cells and then `dragged <https://support.microsoft.com/en-us/office/copy-a-formula-by-dragging-the-fill-handle-in-excel-for-mac-dd928259-622b-473f-9a33-83aa1a63e218>`_
-into other cells to compute them for other columns. In pandas, you'll be doing more operations on
-full columns.
+Adding a row
+~~~~~~~~~~~~
-As an example, let's create a new column "girls_count" and try to compute the number of boys in
-each class.
+Assuming we are using a :class:`~pandas.RangeIndex` (numbered ``0``, ``1``, etc.), we can use :meth:`DataFrame.append` to add a row to the bottom of a ``DataFrame``.
.. ipython:: python
- df["girls_count"] = [21, 12, 21, 31, 23, 17]
- df
- df["boys_count"] = df["student_count"] - df["girls_count"]
df
+ new_row = {"class": "E", "student_count": 51, "all_pass": True}
+ df.append(new_row, ignore_index=True)
-Note that we aren't having to tell it to do that subtraction cell-by-cell — pandas handles that for
-us. See :ref:`how to create new columns derived from existing columns <10min_tut_05_columns>`.
-VLOOKUP
-~~~~~~~
-
-.. ipython:: python
+Find and Replace
+~~~~~~~~~~~~~~~~
- import random
-
- first_names = [
- "harry",
- "ron",
- "hermione",
- "rubius",
- "albus",
- "severus",
- "luna",
- ]
- keys = [1, 2, 3, 4, 5, 6, 7]
- df1 = pd.DataFrame({"keys": keys, "first_names": first_names})
- df1
-
- surnames = [
- "hadrid",
- "malfoy",
- "lovegood",
- "dumbledore",
- "grindelwald",
- "granger",
- "weasly",
- "riddle",
- "longbottom",
- "snape",
- ]
- keys = [random.randint(1, 7) for x in range(0, 10)]
- random_names = pd.DataFrame({"surnames": surnames, "keys": keys})
-
- random_names
-
- random_names.merge(df1, on="keys", how="left")
-
-Adding a row
-~~~~~~~~~~~~
-
-To appended a row, we can just assign values to an index using :meth:`~DataFrame.loc`.
-
-NOTE: If the index already exists, the values in that index will be over written.
+`Excel's Find dialog <https://support.microsoft.com/en-us/office/find-or-replace-text-and-numbers-on-a-worksheet-0e304ca5-ecef-4808-b90f-fdb42f892e90>`_
+takes you to cells that match, one by one. In pandas, this operation is generally done for an
+entire column or ``DataFrame`` at once through :ref:`conditional expressions <10min_tut_03_subset.rows_and_columns>`.
.. ipython:: python
- df1.loc[7] = [8, "tonks"]
- df1
+ tips
+ tips == "Sun"
+ tips["day"].str.contains("S")
+pandas' :meth:`~DataFrame.replace` is comparable to Excel's ``Replace All``.
-Search and Replace
-~~~~~~~~~~~~~~~~~~
+.. ipython:: python
-The ``replace`` method that comes associated with the ``DataFrame`` object can perform
-this function. Please see `pandas.DataFrame.replace <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html>`__ for examples.
+ tips.replace("Thur", "Thu")
diff --git a/doc/source/getting_started/comparison/includes/column_operations.rst b/doc/source/getting_started/comparison/includes/column_operations.rst
index bc5db8e6b8038..b23b931ed2db1 100644
--- a/doc/source/getting_started/comparison/includes/column_operations.rst
+++ b/doc/source/getting_started/comparison/includes/column_operations.rst
@@ -1,4 +1,4 @@
-pandas provides similar vectorized operations by specifying the individual ``Series`` in the
+pandas provides vectorized operations by specifying the individual ``Series`` in the
``DataFrame``. New columns can be assigned in the same way. The :meth:`DataFrame.drop` method drops
a column from the ``DataFrame``.
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index de47bd5b72148..cd5dfb84fee31 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -626,7 +626,7 @@ the pandas-equivalent operations compared to software you already know:
</div>
<div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
<div class="card text-center intro-card shadow">
- <img src="../_static/logo_excel.svg" class="card-img-top" alt="Excel logo" height="52">
+ <img src="../_static/spreadsheets/logo_excel.svg" class="card-img-top" alt="Excel logo" height="52">
<div class="card-body flex-fill">
<p class="card-text">Users of <a href="https://en.wikipedia.org/wiki/Microsoft_Excel">Excel</a>
or other spreadsheet programs will find that many of the concepts are transferrable to pandas.</p>
diff --git a/doc/source/getting_started/intro_tutorials/03_subset_data.rst b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
index fe3eae6c42959..4106b0e064823 100644
--- a/doc/source/getting_started/intro_tutorials/03_subset_data.rst
+++ b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
@@ -268,6 +268,8 @@ For more dedicated functions on missing values, see the user guide section about
</div>
+.. _10min_tut_03_subset.rows_and_columns:
+
How do I select specific rows and columns from a ``DataFrame``?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index 598d3514baa15..b9cab0747196e 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -58,6 +58,8 @@ Westminster* in respectively Paris, Antwerp and London.
How to handle time series data with ease?
-----------------------------------------
+.. _10min_tut_09_timeseries.properties:
+
Using pandas datetime properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 9c9ad9538f488..1156ddd6da410 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -232,6 +232,8 @@ verbose : boolean, default ``False``
skip_blank_lines : boolean, default ``True``
If ``True``, skip over blank lines rather than interpreting as NaN values.
+.. _io.read_csv_table.datetime:
+
Datetime handling
+++++++++++++++++
| [Preview (link to PDF on Google Drive)](https://drive.google.com/file/d/1uUyTQyEAX3F6h4EJqKz7Mz4KehGBnAig/view?usp=sharing)
This pull request gets closer to full parity with [SAS](https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_sas.html)/[STATA](https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_stata.html) comparison pages by adding the Data Input/Output through Merging sections. It still needs Missing Data and GroupBy, but wanted to get this in while I was at a good stopping place. Each section was done in its own commit, if it's easier to review that way.
---
- [x] ~~closes~~ part of https://github.com/pandas-dev/pandas/issues/38990
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/38993 | 2021-01-06T03:27:45Z | 2021-01-06T14:53:11Z | 2021-01-06T14:53:10Z | 2021-01-06T14:53:19Z |
ENH: 2D support for MaskedArray | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 82f9280870d59..bf78a3cdefbdd 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -637,7 +637,7 @@ def pad_inplace(numeric_object_t[:] values, uint8_t[:] mask, limit=None):
@cython.boundscheck(False)
@cython.wraparound(False)
-def pad_2d_inplace(numeric_object_t[:, :] values, const uint8_t[:, :] mask, limit=None):
+def pad_2d_inplace(numeric_object_t[:, :] values, uint8_t[:, :] mask, limit=None):
cdef:
Py_ssize_t i, j, N, K
numeric_object_t val
@@ -656,10 +656,11 @@ def pad_2d_inplace(numeric_object_t[:, :] values, const uint8_t[:, :] mask, limi
val = values[j, 0]
for i in range(N):
if mask[j, i]:
- if fill_count >= lim:
+ if fill_count >= lim or i == 0:
continue
fill_count += 1
values[j, i] = val
+ mask[j, i] = False
else:
fill_count = 0
val = values[j, i]
@@ -759,7 +760,7 @@ def backfill_inplace(numeric_object_t[:] values, uint8_t[:] mask, limit=None):
def backfill_2d_inplace(numeric_object_t[:, :] values,
- const uint8_t[:, :] mask,
+ uint8_t[:, :] mask,
limit=None):
pad_2d_inplace(values[:, ::-1], mask[:, ::-1], limit)
diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py
index 01bb3d50c0da7..66a3152de1499 100644
--- a/pandas/core/array_algos/masked_reductions.py
+++ b/pandas/core/array_algos/masked_reductions.py
@@ -3,7 +3,10 @@
for missing values.
"""
-from typing import Callable
+from typing import (
+ Callable,
+ Optional,
+)
import numpy as np
@@ -19,6 +22,7 @@ def _sumprod(
*,
skipna: bool = True,
min_count: int = 0,
+ axis: Optional[int] = None,
):
"""
Sum or product for 1D masked array.
@@ -36,36 +40,55 @@ def _sumprod(
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
+ axis : int, optional, default None
"""
if not skipna:
- if mask.any() or check_below_min_count(values.shape, None, min_count):
+ if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
- return func(values)
+ return func(values, axis=axis)
else:
- if check_below_min_count(values.shape, mask, min_count):
+ if check_below_min_count(values.shape, mask, min_count) and (
+ axis is None or values.ndim == 1
+ ):
return libmissing.NA
- return func(values, where=~mask)
+
+ return func(values, where=~mask, axis=axis)
def sum(
- values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ min_count: int = 0,
+ axis: Optional[int] = None,
):
return _sumprod(
- np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
+ np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def prod(
- values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ min_count: int = 0,
+ axis: Optional[int] = None,
):
return _sumprod(
- np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count
+ np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def _minmax(
- func: Callable, values: np.ndarray, mask: np.ndarray, *, skipna: bool = True
+ func: Callable,
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
):
"""
Reduction for 1D masked array.
@@ -80,6 +103,7 @@ def _minmax(
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
+ axis : int, optional, default None
"""
if not skipna:
if mask.any() or not values.size:
@@ -96,14 +120,27 @@ def _minmax(
return libmissing.NA
-def min(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
- return _minmax(np.min, values=values, mask=mask, skipna=skipna)
+def min(
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
+):
+ return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)
-def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
- return _minmax(np.max, values=values, mask=mask, skipna=skipna)
+def max(
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
+):
+ return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)
+# TODO: axis kwarg
def mean(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
if not values.size or mask.all():
return libmissing.NA
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index e43e66fed8957..3769c686da029 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -298,27 +298,6 @@ def _wrap_reduction_result(self, axis: int | None, result):
return self._box_func(result)
return self._from_backing_data(result)
- # ------------------------------------------------------------------------
-
- def __repr__(self) -> str:
- if self.ndim == 1:
- return super().__repr__()
-
- from pandas.io.formats.printing import format_object_summary
-
- # the short repr has no trailing newline, while the truncated
- # repr does. So we include a newline in our template, and strip
- # any trailing newlines from format_object_summary
- lines = [
- format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
- ", \n"
- )
- for x in self
- ]
- data = ",\n".join(lines)
- class_name = f"<{type(self).__name__}>"
- return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
-
# ------------------------------------------------------------------------
# __array_function__ methods
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 99c4944a1cfa7..bf54f7166e14d 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1209,6 +1209,9 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:
# ------------------------------------------------------------------------
def __repr__(self) -> str:
+ if self.ndim > 1:
+ return self._repr_2d()
+
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
@@ -1220,6 +1223,22 @@ def __repr__(self) -> str:
class_name = f"<{type(self).__name__}>\n"
return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
+ def _repr_2d(self) -> str:
+ from pandas.io.formats.printing import format_object_summary
+
+ # the short repr has no trailing newline, while the truncated
+ # repr does. So we include a newline in our template, and strip
+ # any trailing newlines from format_object_summary
+ lines = [
+ format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
+ ", \n"
+ )
+ for x in self
+ ]
+ data = ",\n".join(lines)
+ class_name = f"<{type(self).__name__}>"
+ return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
+
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
"""
Formatting function for scalar values.
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 1df7c191bdb68..58e7abbbe1ddd 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -21,6 +21,7 @@
npt,
type_t,
)
+from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -245,10 +246,8 @@ def coerce_to_array(
if mask_values is not None:
mask = mask | mask_values
- if values.ndim != 1:
- raise ValueError("values must be a 1D list-like")
- if mask.ndim != 1:
- raise ValueError("mask must be a 1D list-like")
+ if values.shape != mask.shape:
+ raise ValueError("values.shape and mask.shape must match")
return values, mask
@@ -447,6 +446,144 @@ def _values_for_argsort(self) -> np.ndarray:
data[self._mask] = -1
return data
+ def any(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
+ """
+ Return whether any element is True.
+
+ Returns False unless there is at least one element that is True.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be False, as for an empty array.
+ If `skipna` is False, the result will still be True if there is
+ at least one element that is True, otherwise NA will be returned
+ if there are NA's present.
+ axis : int or None, default 0
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.any : Numpy version of this method.
+ BooleanArray.all : Return whether all elements are True.
+
+ Examples
+ --------
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, False, True]).any()
+ True
+ >>> pd.array([True, False, pd.NA]).any()
+ True
+ >>> pd.array([False, False, pd.NA]).any()
+ False
+ >>> pd.array([], dtype="boolean").any()
+ False
+ >>> pd.array([pd.NA], dtype="boolean").any()
+ False
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, False, pd.NA]).any(skipna=False)
+ True
+ >>> pd.array([False, False, pd.NA]).any(skipna=False)
+ <NA>
+ """
+ kwargs.pop("axis", None)
+ nv.validate_any((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, False)
+ result = values.any(axis=axis)
+
+ if skipna:
+ return result
+ else:
+ if result or self.size == 0 or not self._mask.any():
+ return result
+ else:
+ return self.dtype.na_value
+
+ def all(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
+ """
+ Return whether all elements are True.
+
+ Returns True unless there is at least one element that is False.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be True, as for an empty array.
+ If `skipna` is False, the result will still be False if there is
+ at least one element that is False, otherwise NA will be returned
+ if there are NA's present.
+ axis : int or None, default 0
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.all : Numpy version of this method.
+ BooleanArray.any : Return whether any element is True.
+
+ Examples
+ --------
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, True, pd.NA]).all()
+ True
+ >>> pd.array([True, False, pd.NA]).all()
+ False
+ >>> pd.array([], dtype="boolean").all()
+ True
+ >>> pd.array([pd.NA], dtype="boolean").all()
+ True
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, True, pd.NA]).all(skipna=False)
+ <NA>
+ >>> pd.array([True, False, pd.NA]).all(skipna=False)
+ False
+ """
+ kwargs.pop("axis", None)
+ nv.validate_all((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, True)
+ result = values.all(axis=axis)
+
+ if skipna:
+ return result
+ else:
+ if not result or self.size == 0 or not self._mask.any():
+ return result
+ else:
+ return self.dtype.na_value
+
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index 066f6ebdfcaa6..6d6cc03a1c83e 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -385,21 +385,21 @@ def _cmp_method(self, other, op):
return BooleanArray(result, mask)
- def sum(self, *, skipna=True, min_count=0, **kwargs):
+ def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
- return super()._reduce("sum", skipna=skipna, min_count=min_count)
+ return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
- def prod(self, *, skipna=True, min_count=0, **kwargs):
+ def prod(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_prod((), kwargs)
- return super()._reduce("prod", skipna=skipna, min_count=min_count)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count, axis=axis)
- def min(self, *, skipna=True, **kwargs):
+ def min(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_min((), kwargs)
- return super()._reduce("min", skipna=skipna)
+ return super()._reduce("min", skipna=skipna, axis=axis)
- def max(self, *, skipna=True, **kwargs):
+ def max(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_max((), kwargs)
- return super()._reduce("max", skipna=skipna)
+ return super()._reduce("max", skipna=skipna, axis=axis)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 078adeb11d3fb..4d59832655162 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -458,21 +458,21 @@ def _cmp_method(self, other, op):
return BooleanArray(result, mask)
- def sum(self, *, skipna=True, min_count=0, **kwargs):
+ def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
- return super()._reduce("sum", skipna=skipna, min_count=min_count)
+ return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
- def prod(self, *, skipna=True, min_count=0, **kwargs):
+ def prod(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_prod((), kwargs)
- return super()._reduce("prod", skipna=skipna, min_count=min_count)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count, axis=axis)
- def min(self, *, skipna=True, **kwargs):
+ def min(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_min((), kwargs)
- return super()._reduce("min", skipna=skipna)
+ return super()._reduce("min", skipna=skipna, axis=axis)
- def max(self, *, skipna=True, **kwargs):
+ def max(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_max((), kwargs)
- return super()._reduce("max", skipna=skipna)
+ return super()._reduce("max", skipna=skipna, axis=axis)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 6a03456673604..0247cd717edec 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -22,6 +22,7 @@
Scalar,
ScalarIndexer,
SequenceIndexer,
+ Shape,
npt,
type_t,
)
@@ -34,10 +35,10 @@
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
+ is_bool,
is_bool_dtype,
is_dtype_equal,
is_float_dtype,
- is_integer,
is_integer_dtype,
is_object_dtype,
is_scalar,
@@ -120,6 +121,10 @@ class BaseMaskedArray(OpsMixin, ExtensionArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value: Scalar
+ # our underlying data and mask are each ndarrays
+ _data: np.ndarray
+ _mask: np.ndarray
+
# Fill values used for any/all
_truthy_value = Scalar # bool(_truthy_value) = True
_falsey_value = Scalar # bool(_falsey_value) = False
@@ -131,12 +136,8 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
"mask should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
- if values.ndim != 1:
- raise ValueError("values must be a 1D array")
- if mask.ndim != 1:
- raise ValueError("mask must be a 1D array")
if values.shape != mask.shape:
- raise ValueError("values and mask must have same shape")
+ raise ValueError("values.shape must match mask.shape")
if copy:
values = values.copy()
@@ -160,14 +161,16 @@ def __getitem__(self: BaseMaskedArrayT, item: SequenceIndexer) -> BaseMaskedArra
def __getitem__(
self: BaseMaskedArrayT, item: PositionalIndexer
) -> BaseMaskedArrayT | Any:
- if is_integer(item):
- if self._mask[item]:
+ item = check_array_indexer(self, item)
+
+ newmask = self._mask[item]
+ if is_bool(newmask):
+ # This is a scalar indexing
+ if newmask:
return self.dtype.na_value
return self._data[item]
- item = check_array_indexer(self, item)
-
- return type(self)(self._data[item], self._mask[item])
+ return type(self)(self._data[item], newmask)
@doc(ExtensionArray.fillna)
def fillna(
@@ -187,13 +190,13 @@ def fillna(
if mask.any():
if method is not None:
- func = missing.get_fill_func(method)
+ func = missing.get_fill_func(method, ndim=self.ndim)
new_values, new_mask = func(
- self._data.copy(),
+ self._data.copy().T,
limit=limit,
- mask=mask.copy(),
+ mask=mask.copy().T,
)
- return type(self)(new_values, new_mask.view(np.bool_))
+ return type(self)(new_values.T, new_mask.view(np.bool_).T)
else:
# fill with value
new_values = self.copy()
@@ -220,15 +223,52 @@ def __setitem__(self, key, value) -> None:
self._mask[key] = mask
def __iter__(self):
- for i in range(len(self)):
- if self._mask[i]:
- yield self.dtype.na_value
- else:
- yield self._data[i]
+ if self.ndim == 1:
+ for i in range(len(self)):
+ if self._mask[i]:
+ yield self.dtype.na_value
+ else:
+ yield self._data[i]
+ else:
+ for i in range(len(self)):
+ yield self[i]
def __len__(self) -> int:
return len(self._data)
+ @property
+ def shape(self) -> Shape:
+ return self._data.shape
+
+ @property
+ def ndim(self) -> int:
+ return self._data.ndim
+
+ def swapaxes(self: BaseMaskedArrayT, axis1, axis2) -> BaseMaskedArrayT:
+ data = self._data.swapaxes(axis1, axis2)
+ mask = self._mask.swapaxes(axis1, axis2)
+ return type(self)(data, mask)
+
+ def delete(self: BaseMaskedArrayT, loc, axis: int = 0) -> BaseMaskedArrayT:
+ data = np.delete(self._data, loc, axis=axis)
+ mask = np.delete(self._mask, loc, axis=axis)
+ return type(self)(data, mask)
+
+ def reshape(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
+ data = self._data.reshape(*args, **kwargs)
+ mask = self._mask.reshape(*args, **kwargs)
+ return type(self)(data, mask)
+
+ def ravel(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
+ # TODO: need to make sure we have the same order for data/mask
+ data = self._data.ravel(*args, **kwargs)
+ mask = self._mask.ravel(*args, **kwargs)
+ return type(self)(data, mask)
+
+ @property
+ def T(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
+ return type(self)(self._data.T, self._mask.T)
+
def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask.copy())
@@ -454,10 +494,12 @@ def nbytes(self) -> int:
@classmethod
def _concat_same_type(
- cls: type[BaseMaskedArrayT], to_concat: Sequence[BaseMaskedArrayT]
+ cls: type[BaseMaskedArrayT],
+ to_concat: Sequence[BaseMaskedArrayT],
+ axis: int = 0,
) -> BaseMaskedArrayT:
- data = np.concatenate([x._data for x in to_concat])
- mask = np.concatenate([x._mask for x in to_concat])
+ data = np.concatenate([x._data for x in to_concat], axis=axis)
+ mask = np.concatenate([x._mask for x in to_concat], axis=axis)
return cls(data, mask)
def take(
@@ -466,15 +508,22 @@ def take(
*,
allow_fill: bool = False,
fill_value: Scalar | None = None,
+ axis: int = 0,
) -> BaseMaskedArrayT:
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
result = take(
- self._data, indexer, fill_value=data_fill_value, allow_fill=allow_fill
+ self._data,
+ indexer,
+ fill_value=data_fill_value,
+ allow_fill=allow_fill,
+ axis=axis,
)
- mask = take(self._mask, indexer, fill_value=True, allow_fill=allow_fill)
+ mask = take(
+ self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
+ )
# if we are filling
# we only fill where the indexer is null
@@ -593,7 +642,8 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"sum", "prod", "min", "max", "mean"}:
op = getattr(masked_reductions, name)
- return op(data, mask, skipna=skipna, **kwargs)
+ result = op(data, mask, skipna=skipna, **kwargs)
+ return result
# coerce to a nan-aware float if needed
# (we explicitly use NaN within reductions)
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index c5301a3bd3683..e1990dc064a84 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -152,6 +152,18 @@ def _arith_method(self, other, op):
_HANDLED_TYPES = (np.ndarray, numbers.Number)
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ result = super()._reduce(name, skipna=skipna, **kwargs)
+ if isinstance(result, np.ndarray):
+ axis = kwargs["axis"]
+ if skipna:
+ # we only retain mask for all-NA rows/columns
+ mask = self._mask.all(axis=axis)
+ else:
+ mask = self._mask.any(axis=axis)
+ return type(self)(result, mask=mask)
+ return result
+
def __neg__(self):
return type(self)(-self._data, self._mask.copy())
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 8d150c8f6ad3d..d93fa4bbdd7fc 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -319,7 +319,9 @@ def __init__(self, values, copy=False):
def _validate(self):
"""Validate that we only store NA or strings."""
- if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
+ if len(self._ndarray) and not lib.is_string_array(
+ self._ndarray.ravel("K"), skipna=True
+ ):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
@@ -447,9 +449,11 @@ def astype(self, dtype, copy=True):
return super().astype(dtype, copy)
- def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ def _reduce(
+ self, name: str, *, skipna: bool = True, axis: int | None = 0, **kwargs
+ ):
if name in ["min", "max"]:
- return getattr(self, name)(skipna=skipna)
+ return getattr(self, name)(skipna=skipna, axis=axis)
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
diff --git a/pandas/tests/arrays/boolean/test_construction.py b/pandas/tests/arrays/boolean/test_construction.py
index c9e96c437964f..f080bf7e03412 100644
--- a/pandas/tests/arrays/boolean/test_construction.py
+++ b/pandas/tests/arrays/boolean/test_construction.py
@@ -27,10 +27,10 @@ def test_boolean_array_constructor():
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
- with pytest.raises(ValueError, match="values must be a 1D array"):
+ with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values.reshape(1, -1), mask)
- with pytest.raises(ValueError, match="mask must be a 1D array"):
+ with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values, mask.reshape(1, -1))
@@ -183,10 +183,10 @@ def test_coerce_to_array():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
- with pytest.raises(ValueError, match="values must be a 1D list-like"):
+ with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values.reshape(1, -1))
- with pytest.raises(ValueError, match="mask must be a 1D list-like"):
+ with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values, mask=mask.reshape(1, -1))
diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py
index b80d2a3586b3b..b4a817cbc37ec 100644
--- a/pandas/tests/extension/base/dim2.py
+++ b/pandas/tests/extension/base/dim2.py
@@ -4,6 +4,11 @@
import numpy as np
import pytest
+from pandas.compat import (
+ IS64,
+ is_platform_windows,
+)
+
import pandas as pd
from pandas.tests.extension.base.base import BaseExtensionTests
@@ -194,9 +199,23 @@ def test_reductions_2d_axis0(self, data, method, request):
if method in ["sum", "prod"] and data.dtype.kind in ["i", "u"]:
# FIXME: kludge
if data.dtype.kind == "i":
- dtype = pd.Int64Dtype()
+ if is_platform_windows() or not IS64:
+ # FIXME: kludge for 32bit builds
+ if result.dtype.itemsize == 4:
+ dtype = pd.Int32Dtype()
+ else:
+ dtype = pd.Int64Dtype()
+ else:
+ dtype = pd.Int64Dtype()
else:
- dtype = pd.UInt64Dtype()
+ if is_platform_windows() or not IS64:
+ # FIXME: kludge for 32bit builds
+ if result.dtype.itemsize == 4:
+ dtype = pd.UInt32Dtype()
+ else:
+ dtype = pd.UInt64Dtype()
+ else:
+ dtype = pd.UInt64Dtype()
expected = data.astype(dtype)
assert type(expected) == type(data), type(expected)
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 9260c342caa6b..9c4bf76b27c14 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -393,3 +393,7 @@ class TestUnaryOps(base.BaseUnaryOpsTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index 173bc2d05af2f..500c2fbb74d17 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -223,3 +223,7 @@ class TestPrinting(base.BasePrintingTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 2cf4f8e415770..344b0be20fc7b 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -254,3 +254,7 @@ class TestPrinting(base.BasePrintingTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 3d0edb70d1ced..af86c359c4c00 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -19,6 +19,7 @@
import pytest
import pandas as pd
+from pandas.core.arrays import ArrowStringArray
from pandas.core.arrays.string_ import StringDtype
from pandas.tests.extension import base
@@ -186,3 +187,13 @@ class TestPrinting(base.BasePrintingTests):
class TestGroupBy(base.BaseGroupbyTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ @pytest.fixture(autouse=True)
+ def arrow_not_supported(self, data, request):
+ if isinstance(data, ArrowStringArray):
+ mark = pytest.mark.xfail(
+ reason="2D support not implemented for ArrowStringArray"
+ )
+ request.node.add_marker(mark)
| This doesn't in any way _use_ the 2D support, but opens up the option of incrementally fleshing out the tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/38992 | 2021-01-06T02:55:49Z | 2021-10-16T17:58:42Z | 2021-10-16T17:58:42Z | 2022-04-15T20:39:23Z |
Backport PR #38987 on branch 1.2.x (Fix bug on master) | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index eba097cd8c345..a78af82ba4db8 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2430,16 +2430,14 @@ Read a URL with no options:
.. ipython:: python
- url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
+ url = (
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/html/spam.html"
+ )
dfs = pd.read_html(url)
dfs
-.. note::
-
- The data from the above URL changes every Monday so the resulting data above
- and the data below may be slightly different.
-
-Read in the content of the file from the above URL and pass it to ``read_html``
+Read in the content of the "banklist.html" file and pass it to ``read_html``
as a string:
.. ipython:: python
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index ba8b1a8a0679d..aed1aaedf2fa3 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -129,6 +129,7 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
@@ -142,6 +143,7 @@ def test_banklist_url_positional_match(self):
assert_framelist_equal(df1, df2)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
| Backport PR #38987: Fix bug on master | https://api.github.com/repos/pandas-dev/pandas/pulls/38991 | 2021-01-06T02:53:25Z | 2021-01-06T12:53:02Z | 2021-01-06T12:53:02Z | 2021-01-06T12:53:03Z |
Fix bug on master | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 9c9ad9538f488..01235958c5b22 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2444,16 +2444,14 @@ Read a URL with no options:
.. ipython:: python
- url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
+ url = (
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/html/spam.html"
+ )
dfs = pd.read_html(url)
dfs
-.. note::
-
- The data from the above URL changes every Monday so the resulting data above
- and the data below may be slightly different.
-
-Read in the content of the file from the above URL and pass it to ``read_html``
+Read in the content of the "banklist.html" file and pass it to ``read_html``
as a string:
.. ipython:: python
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index e10cb10ca66c4..7b762e4891c14 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -129,6 +129,7 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
@@ -142,6 +143,7 @@ def test_banklist_url_positional_match(self):
assert_framelist_equal(df1, df2)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
The banklist file was removed and replaced with banklist.csv. This causes failures on master.
This is just a temporary fix for the user guide.
cc @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/38987 | 2021-01-06T02:02:54Z | 2021-01-06T02:52:33Z | 2021-01-06T02:52:33Z | 2021-01-06T02:53:30Z |
BUG: Datetimelike equality comparisons with Categorical | diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 7c093ebe00959..81bcff410a4d3 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -85,6 +85,20 @@ def test_compare_len1_raises(self):
with pytest.raises(ValueError, match="Lengths must match"):
idx <= idx[[0]]
+ @pytest.mark.parametrize(
+ "result",
+ [
+ pd.date_range("2020", periods=3),
+ pd.date_range("2020", periods=3, tz="UTC"),
+ pd.timedelta_range("0 days", periods=3),
+ pd.period_range("2020Q1", periods=3, freq="Q"),
+ ],
+ )
+ def test_compare_with_Categorical(self, result):
+ expected = pd.Categorical(result)
+ assert all(result == expected)
+ assert not any(result != expected)
+
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("as_index", [True, False])
def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):
| - [ ] closes #30699
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38986 | 2021-01-06T01:37:23Z | 2021-01-08T14:10:03Z | 2021-01-08T14:10:02Z | 2021-01-08T14:10:07Z |
REF: de-duplication in libperiod | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index f0d21a3a7a957..5d3ad559ea718 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -357,18 +357,15 @@ cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
return upsample_daytime(unix_date, af_info)
-cdef void MtoD_ym(int64_t ordinal, int *year, int *month) nogil:
- year[0] = ordinal // 12 + 1970
- month[0] = ordinal % 12 + 1
-
-
cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int64_t unix_date
int year, month
ordinal += af_info.is_end
- MtoD_ym(ordinal, &year, &month)
+
+ year = ordinal // 12 + 1970
+ month = ordinal % 12 + 1
unix_date = unix_date_from_ymd(year, month, 1)
unix_date -= af_info.is_end
@@ -449,10 +446,7 @@ cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
- if dts.month > af_info.to_end:
- return <int64_t>(dts.year + 1 - 1970)
- else:
- return <int64_t>(dts.year - 1970)
+ return dts_to_year_ordinal(&dts, af_info.to_end)
cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, npy_datetimestruct* dts) nogil:
@@ -483,7 +477,7 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
- return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
+ return dts_to_month_ordinal(&dts)
cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil:
@@ -716,6 +710,40 @@ cdef int64_t unix_date_from_ymd(int year, int month, int day) nogil:
return unix_date
+cdef inline int64_t dts_to_month_ordinal(npy_datetimestruct* dts) nogil:
+ # AKA: use npy_datetimestruct_to_datetime(NPY_FR_M, &dts)
+ return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
+
+
+cdef inline int64_t dts_to_year_ordinal(npy_datetimestruct *dts, int to_end) nogil:
+ cdef:
+ int64_t result
+
+ result = npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT.NPY_FR_Y, dts)
+ if dts.month > to_end:
+ return result + 1
+ else:
+ return result
+
+
+cdef inline int64_t dts_to_qtr_ordinal(npy_datetimestruct* dts, int to_end) nogil:
+ cdef:
+ int quarter
+
+ adjust_dts_for_qtr(dts, to_end)
+ quarter = month_to_quarter(dts.month)
+ return <int64_t>((dts.year - 1970) * 4 + quarter - 1)
+
+
+cdef inline int get_anchor_month(int freq, int freq_group) nogil:
+ cdef:
+ int fmonth
+ fmonth = freq - freq_group
+ if fmonth == 0:
+ fmonth = 12
+ return fmonth
+
+
# specifically _dont_ use cdvision or else ordinals near -1 are assigned to
# incorrect dates GH#19643
@cython.cdivision(False)
@@ -740,23 +768,12 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil:
freq_group = get_freq_group(freq)
if freq_group == FR_ANN:
- fmonth = freq - FR_ANN
- if fmonth == 0:
- fmonth = 12
-
- mdiff = dts.month - fmonth
- if mdiff <= 0:
- return dts.year - 1970
- else:
- return dts.year - 1970 + 1
+ fmonth = get_anchor_month(freq, freq_group)
+ return dts_to_year_ordinal(dts, fmonth)
elif freq_group == FR_QTR:
- fmonth = freq - FR_QTR
- if fmonth == 0:
- fmonth = 12
-
- mdiff = dts.month - fmonth + 12
- return (dts.year - 1970) * 4 + (mdiff - 1) // 3
+ fmonth = get_anchor_month(freq, freq_group)
+ return dts_to_qtr_ordinal(dts, fmonth)
elif freq_group == FR_WK:
unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38985 | 2021-01-06T01:01:31Z | 2021-01-06T18:34:47Z | 2021-01-06T18:34:47Z | 2021-01-06T18:44:48Z | |
BUG: MultiIndex.intersection duplicating nans in result | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38b7a1d13c253..cbf0d4a4d708b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -260,6 +260,7 @@ MultiIndex
^^^^^^^^^^
- Bug in :meth:`DataFrame.drop` raising ``TypeError`` when :class:`MultiIndex` is non-unique and no level is provided (:issue:`36293`)
+- Bug in :meth:`MultiIndex.intersection` duplicating ``NaN`` in result (:issue:`38623`)
- Bug in :meth:`MultiIndex.equals` incorrectly returning ``True`` when :class:`MultiIndex` containing ``NaN`` even when they are differntly ordered (:issue:`38439`)
- Bug in :meth:`MultiIndex.intersection` always returning empty when intersecting with :class:`CategoricalIndex` (:issue:`38653`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 61b6b7ff19edc..f058645c4abda 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3578,16 +3578,9 @@ def _intersection(self, other, sort=False):
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
- other_uniq = set(rvals)
- seen = set()
- # pandas\core\indexes\multi.py:3503: error: "add" of "set" does not
- # return a value [func-returns-value]
- uniq_tuples = [
- x
- for x in lvals
- if x in other_uniq
- and not (x in seen or seen.add(x)) # type: ignore[func-returns-value]
- ]
+ left_unique = self.drop_duplicates()
+ indexer = left_unique.get_indexer(other.drop_duplicates())
+ uniq_tuples = left_unique.take(np.sort(indexer[indexer != -1]))
if sort is None:
uniq_tuples = sorted(uniq_tuples)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index d5b29527ee08e..f872315374174 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -483,3 +483,12 @@ def test_intersection_different_names():
mi2 = MultiIndex.from_arrays([[1], [3]])
result = mi.intersection(mi2)
tm.assert_index_equal(result, mi2)
+
+
+def test_intersection_with_missing_values_on_both_sides(nulls_fixture):
+ # GH#38623
+ mi1 = MultiIndex.from_arrays([[3, nulls_fixture, 4, nulls_fixture], [1, 2, 4, 2]])
+ mi2 = MultiIndex.from_arrays([[3, nulls_fixture, 3], [1, 2, 4]])
+ result = mi1.intersection(mi2)
+ expected = MultiIndex.from_arrays([[3.0, nulls_fixture], [1, 2]])
+ tm.assert_index_equal(result, expected)
| - [x] xref #38623
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This also aligns the multiindex implementation with the base implementation a bit more. | https://api.github.com/repos/pandas-dev/pandas/pulls/38984 | 2021-01-06T00:34:12Z | 2021-01-06T14:44:46Z | 2021-01-06T14:44:46Z | 2021-01-06T15:24:33Z |
REGR: Bug fix for ExtensionArray groupby aggregation on non-numeric types | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 36b4b4fa77c4a..849b599141c2b 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -28,6 +28,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.replace` raising ``ValueError`` when :class:`DataFrame` has dtype ``bytes`` (:issue:`38900`)
- Fixed regression in :meth:`DataFrameGroupBy.diff` raising for ``int8`` and ``int16`` columns (:issue:`39050`)
- Fixed regression that raised ``AttributeError`` with PyArrow versions [0.16.0, 1.0.0) (:issue:`38801`)
+- Fixed regression in :meth:`DataFrame.groupby` when aggregating an :class:`ExtensionDType` that could fail for non-numeric values (:issue:`38980`)
-
-
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 45897666b6ccf..2c0ba5b05c19b 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -540,7 +540,9 @@ def _ea_wrap_cython_operation(
result = type(orig_values)._from_sequence(res_values)
return result
- raise NotImplementedError(values.dtype)
+ raise NotImplementedError(
+ f"function is not implemented for this dtype: {values.dtype}"
+ )
@final
def _cython_operation(
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 94d0ef7bbea84..c81304695f353 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -33,6 +33,22 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping):
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
+ def test_groupby_agg_extension(self, data_for_grouping):
+ # GH#38980 groupby agg on extension type fails for non-numeric types
+ df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
+
+ expected = df.iloc[[0, 2, 4, 7]]
+ expected = expected.set_index("A")
+
+ result = df.groupby("A").agg({"B": "first"})
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").agg("first")
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").first()
+ self.assert_frame_equal(result, expected)
+
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 233b658d29782..08768bda312ba 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -197,6 +197,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
def test_groupby_apply_identity(self, data_for_grouping):
super().test_groupby_apply_identity(data_for_grouping)
+ @pytest.mark.xfail(reason="GH#39098: Converts agg result to object")
+ def test_groupby_agg_extension(self, data_for_grouping):
+ super().test_groupby_agg_extension(data_for_grouping)
+
class TestSetitem(BaseDecimal, base.BaseSetitemTests):
pass
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 3a5e49796c53b..164a39498ec73 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -313,6 +313,10 @@ def test_groupby_extension_apply(self):
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
+ @pytest.mark.xfail(reason="GH#39098: Converts agg result to object")
+ def test_groupby_agg_extension(self, data_for_grouping):
+ super().test_groupby_agg_extension(data_for_grouping)
+
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index ced7ea9261310..86a0bc9213256 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -291,6 +291,22 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping):
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
+ def test_groupby_agg_extension(self, data_for_grouping):
+ # GH#38980 groupby agg on extension type fails for non-numeric types
+ df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
+
+ expected = df.iloc[[0, 2, 4]]
+ expected = expected.set_index("A")
+
+ result = df.groupby("A").agg({"B": "first"})
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").agg("first")
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").first()
+ self.assert_frame_equal(result, expected)
+
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
| - [X] closes #38980
- [x] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38982 | 2021-01-05T23:02:23Z | 2021-01-13T13:18:40Z | 2021-01-13T13:18:40Z | 2021-01-14T19:08:56Z |
CLN: Multiindex tests | diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 7666e9670e6a6..11687b535d2b7 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -189,37 +189,24 @@ def test_from_arrays_tuples(idx):
tm.assert_index_equal(result, idx)
-def test_from_arrays_index_series_datetimetz():
- idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
- idx2 = date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo")
- result = MultiIndex.from_arrays([idx1, idx2])
- tm.assert_index_equal(result.get_level_values(0), idx1)
- tm.assert_index_equal(result.get_level_values(1), idx2)
-
- result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
- tm.assert_index_equal(result2.get_level_values(0), idx1)
- tm.assert_index_equal(result2.get_level_values(1), idx2)
-
- tm.assert_index_equal(result, result2)
-
-
-def test_from_arrays_index_series_timedelta():
- idx1 = pd.timedelta_range("1 days", freq="D", periods=3)
- idx2 = pd.timedelta_range("2 hours", freq="H", periods=3)
- result = MultiIndex.from_arrays([idx1, idx2])
- tm.assert_index_equal(result.get_level_values(0), idx1)
- tm.assert_index_equal(result.get_level_values(1), idx2)
-
- result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
- tm.assert_index_equal(result2.get_level_values(0), idx1)
- tm.assert_index_equal(result2.get_level_values(1), idx2)
-
- tm.assert_index_equal(result, result2)
-
-
-def test_from_arrays_index_series_period():
- idx1 = pd.period_range("2011-01-01", freq="D", periods=3)
- idx2 = pd.period_range("2015-01-01", freq="H", periods=3)
+@pytest.mark.parametrize(
+ ("idx1", "idx2"),
+ [
+ (
+ pd.period_range("2011-01-01", freq="D", periods=3),
+ pd.period_range("2015-01-01", freq="H", periods=3),
+ ),
+ (
+ date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
+ date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo"),
+ ),
+ (
+ pd.timedelta_range("1 days", freq="D", periods=3),
+ pd.timedelta_range("2 hours", freq="H", periods=3),
+ ),
+ ],
+)
+def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2):
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 8dc8572493444..7ec3df9fee0e5 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -79,10 +79,7 @@ def test_copy_method_kwargs(deep, kwarg, value):
names=["first", "second"],
)
idx_copy = idx.copy(**{kwarg: value, "deep": deep})
- if kwarg == "names":
- assert getattr(idx_copy, kwarg) == value
- else:
- assert [list(i) for i in getattr(idx_copy, kwarg)] == value
+ assert getattr(idx_copy, kwarg) == value
@pytest.mark.parametrize("deep", [True, False])
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index aa2f37dad152c..26017dd976f73 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -68,6 +68,7 @@ def test_unique_level(idx, level):
mi = MultiIndex.from_arrays([[], []], names=["first", "second"])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index f9ab0b3aceec4..2fdf6d1913a0f 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -137,7 +137,7 @@ def test_dims():
pass
-def take_invalid_kwargs():
+def test_take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Found a few wrong tests and parametrized the similar ones
| https://api.github.com/repos/pandas-dev/pandas/pulls/38978 | 2021-01-05T19:59:44Z | 2021-01-06T14:43:23Z | 2021-01-06T14:43:23Z | 2021-01-06T15:24:58Z |
BUG: MultiIndex.union dropping duplicates from result | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index a72f78f3ca30d..badbc88302d6b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -843,7 +843,7 @@ Interval
Indexing
^^^^^^^^
-- Bug in :meth:`Index.union` dropping duplicate ``Index`` values when ``Index`` was not monotonic or ``sort`` was set to ``False`` (:issue:`36289`, :issue:`31326`, :issue:`40862`)
+- Bug in :meth:`Index.union` and :meth:`MultiIndex.union` dropping duplicate ``Index`` values when ``Index`` was not monotonic or ``sort`` was set to ``False`` (:issue:`36289`, :issue:`31326`, :issue:`40862`)
- Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`)
- Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`)
- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` when setting multiple values to duplicate columns (:issue:`15695`)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index cbef4ed44dc06..d7e15bb2ad197 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -291,7 +291,7 @@ def item_from_zerodim(val: object) -> object:
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_unique_multiple(list arrays, sort: bool = True) -> list:
+def fast_unique_multiple(list arrays, sort: bool = True):
"""
Generate a list of unique values from a list of arrays.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1a3719233a1da..eb72355fce583 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3574,14 +3574,20 @@ def equal_levels(self, other: MultiIndex) -> bool:
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
+ if (
+ any(-1 in code for code in self.codes)
+ and any(-1 in code for code in self.codes)
+ or self.has_duplicates
+ or other.has_duplicates
+ ):
+ # This is only necessary if both sides have nans or one has dups,
+ # fast_unique_multiple is faster
+ result = super()._union(other, sort)
+ else:
+ rvals = other._values.astype(object, copy=False)
+ result = lib.fast_unique_multiple([self._values, rvals], sort=sort)
- # We could get here with CategoricalIndex other
- rvals = other._values.astype(object, copy=False)
- uniq_tuples = lib.fast_unique_multiple([self._values, rvals], sort=sort)
-
- return MultiIndex.from_arrays(
- zip(*uniq_tuples), sortorder=0, names=result_names
- )
+ return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 0b59e832ce3a8..eb456bee39dbf 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -3,7 +3,9 @@
import pandas as pd
from pandas import (
+ CategoricalIndex,
Index,
+ IntervalIndex,
MultiIndex,
Series,
)
@@ -508,3 +510,26 @@ def test_intersection_with_missing_values_on_both_sides(nulls_fixture):
result = mi1.intersection(mi2)
expected = MultiIndex.from_arrays([[3.0, nulls_fixture], [1, 2]])
tm.assert_index_equal(result, expected)
+
+
+def test_union_nan_got_duplicated():
+ # GH#38977
+ mi1 = MultiIndex.from_arrays([[1.0, np.nan], [2, 3]])
+ mi2 = MultiIndex.from_arrays([[1.0, np.nan, 3.0], [2, 3, 4]])
+ result = mi1.union(mi2)
+ tm.assert_index_equal(result, mi2)
+
+
+def test_union_duplicates(index):
+ # GH#38977
+ if index.empty or isinstance(index, (IntervalIndex, CategoricalIndex)):
+ # No duplicates in empty indexes
+ return
+ values = index.unique().values.tolist()
+ mi1 = MultiIndex.from_arrays([values, [1] * len(values)])
+ mi2 = MultiIndex.from_arrays([[values[0]] + values, [1] * (len(values) + 1)])
+ result = mi1.union(mi2)
+ tm.assert_index_equal(result, mi2.sort_values())
+
+ result = mi2.union(mi1)
+ tm.assert_index_equal(result, mi2.sort_values())
diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py
index 67bd5b309b634..5b7e90fe16d8f 100644
--- a/pandas/tests/libs/test_lib.py
+++ b/pandas/tests/libs/test_lib.py
@@ -2,7 +2,6 @@
import pytest
from pandas._libs import (
- Timestamp,
lib,
writers as libwriters,
)
@@ -43,11 +42,6 @@ def test_fast_unique_multiple_list_gen_sort(self):
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
- def test_fast_unique_multiple_unsortable_runtimewarning(self):
- arr = [np.array(["foo", Timestamp("2000")])]
- with tm.assert_produces_warning(RuntimeWarning):
- lib.fast_unique_multiple(arr, sort=None)
-
class TestIndexing:
def test_maybe_indices_to_slice_left_edge(self):
| - [x] xref #38745
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This more or less sits on top of #36299. The current base-_union implementation works only for sorted indexes correctly if indexes containing duplicates. Hence I've only added tests for sorted indexes, | https://api.github.com/repos/pandas-dev/pandas/pulls/38977 | 2021-01-05T19:42:42Z | 2021-05-26T02:02:27Z | 2021-05-26T02:02:27Z | 2021-05-27T10:26:30Z |
DOC: remove is_lexsorted from MultiIndex docstring | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1da355c31987e..61b6b7ff19edc 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -216,7 +216,6 @@ class MultiIndex(Index):
set_codes
to_frame
to_flat_index
- is_lexsorted
sortlevel
droplevel
swaplevel
| `is_lexsorted` has been deprecated (https://github.com/pandas-dev/pandas/pull/38701)
- [x] closes https://github.com/pandas-dev/pandas/issues/38953
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38975 | 2021-01-05T17:31:37Z | 2021-01-06T00:15:27Z | 2021-01-06T00:15:27Z | 2021-01-06T00:15:31Z |
TST: update pre-commit config to only exclude extension from bare pytest.raises check | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f5d8503041ccd..52f923c41cbd4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -126,7 +126,7 @@ repos:
entry: python scripts/validate_unwanted_patterns.py --validation-type="bare_pytest_raises"
types: [python]
files: ^pandas/tests/
- exclude: ^pandas/tests/(computation|extension|io)/
+ exclude: ^pandas/tests/extension/
- id: inconsistent-namespace-usage
name: 'Check for inconsistent use of pandas namespace in tests'
entry: python scripts/check_for_inconsistent_pandas_namespace.py
| With #38920 I eliminated all instances of `pytest.raise` without `match=msg` in pandas/tests/computation and pandas/tests/io. #38799 was happening around the same time and missed that they were fixed. So this closes the loop and now only pandas/tests/extension needs to be excluded from the linting check.
I don't think the bare `pytest.raise`s in pandas/tests/extensions will be removed. They are in a pretty complex inheritance hierarchy and reused for many different types of errors and error messages. So I propose that this PR closes #30999.
- [x] closes #30999
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38973 | 2021-01-05T16:56:16Z | 2021-01-05T18:47:08Z | 2021-01-05T18:47:07Z | 2021-01-05T19:10:57Z |
Backport PR #38803 on branch 1.2.x (BUG: avoid attribute error with pyarrow >=0.16.0 and <1.0.0) | diff --git a/ci/deps/actions-37-locale.yaml b/ci/deps/actions-37-locale.yaml
index 4f9918ca2f0c0..b18ce37d05ca0 100644
--- a/ci/deps/actions-37-locale.yaml
+++ b/ci/deps/actions-37-locale.yaml
@@ -30,7 +30,7 @@ dependencies:
- openpyxl
- pandas-gbq
- google-cloud-bigquery>=1.27.2 # GH 36436
- - pyarrow>=0.17
+ - pyarrow=0.17 # GH 38803
- pytables>=3.5.1
- scipy
- xarray=0.12.3
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index e9602bbe1cee1..5695c817b5a3a 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -37,6 +37,7 @@ I/O
- Bumped minimum fastparquet version to 0.4.0 to avoid ``AttributeError`` from numba (:issue:`38344`)
- Bumped minimum pymysql version to 0.8.1 to avoid test failures (:issue:`38344`)
+- Fixed ``AttributeError`` with PyArrow versions [0.16.0, 1.0.0) (:issue:`38801`)
-
-
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 184fbc050036b..7d3806fe11bd2 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -29,13 +29,12 @@
except ImportError:
pa = None
else:
- # our min supported version of pyarrow, 0.15.1, does not have a compute
- # module
- try:
+ # PyArrow backed StringArrays are available starting at 1.0.0, but this
+ # file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute
+ # and its compute functions existed. GH38801
+ if LooseVersion(pa.__version__) >= "1.0.0":
import pyarrow.compute as pc
- except ImportError:
- pass
- else:
+
ARROW_CMP_FUNCS = {
"eq": pc.equal,
"ne": pc.not_equal,
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 99e7c3061d670..a9357ef89de92 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -880,7 +880,7 @@ def test_timezone_aware_index(self, pa, timezone_aware_date_list):
# this use-case sets the resolution to 1 minute
check_round_trip(df, pa, check_dtype=False)
- @td.skip_if_no("pyarrow", min_version="0.17")
+ @td.skip_if_no("pyarrow", min_version="1.0.0")
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
df = pd.DataFrame({"a": list(range(0, 3))})
| Backport PR #38803: BUG: avoid attribute error with pyarrow >=0.16.0 and <1.0.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/38971 | 2021-01-05T13:05:41Z | 2021-01-05T14:12:45Z | 2021-01-05T14:12:45Z | 2021-01-05T14:12:45Z |
Backport PR #38841 on branch 1.2.x (Update conf.py to execute imports during pdf building) | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 951a6d4043786..8ab1c8c2f3428 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -427,7 +427,7 @@
ipython_warning_is_error = False
-ipython_exec_lines = [
+ipython_execlines = [
"import numpy as np",
"import pandas as pd",
# This ensures correct rendering on system with console encoding != utf8
| Backport PR #38841: Update conf.py to execute imports during pdf building | https://api.github.com/repos/pandas-dev/pandas/pulls/38970 | 2021-01-05T12:58:45Z | 2021-01-05T14:12:29Z | 2021-01-05T14:12:29Z | 2021-01-05T14:12:30Z |
TST: add missing iloc label indexing tests | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index bfc6b820c0fc0..24721a370241f 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -61,8 +61,8 @@ def test_iloc_getitem_list_int(self):
# the correct type
-class TestiLoc2:
- # TODO: better name, just separating out things that dont rely on base class
+class TestiLocBaseIndependent:
+ """Tests Independent Of Base Class"""
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesnt matter for _is_scalar_access
@@ -262,12 +262,42 @@ def test_iloc_getitem_dups(self):
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}])
+ tm.assert_frame_equal(df.iloc[[0]], expected)
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ tm.assert_frame_equal(df.iloc[[0, 1]], expected)
+
+ expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2])
+ result = df.iloc[[0, 2], [1, 2]]
+ tm.assert_frame_equal(result, expected)
def test_iloc_getitem_bool(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ result = df.iloc[[True, True, False]]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame(
+ [{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2]
+ )
+ result = df.iloc[lambda x: x.index % 2 == 0]
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_iloc_getitem_bool_diff_len(self, index):
@@ -278,8 +308,27 @@ def test_iloc_getitem_bool_diff_len(self, index):
_ = s.iloc[index]
def test_iloc_getitem_slice(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ result = df.iloc[:2]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([{"A": 100, "B": 200}], index=[1])
+ result = df.iloc[1:2, 0:2]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame(
+ [{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}]
+ )
+ result = df.iloc[:, lambda df: [0, 2]]
+ tm.assert_frame_equal(result, expected)
def test_iloc_getitem_slice_dups(self):
| - [x] closes #38967
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38967 | 2021-01-05T09:00:38Z | 2021-01-05T14:12:11Z | 2021-01-05T14:12:10Z | 2021-01-10T11:05:52Z |
BUG: Timedelta(td64_out_of_bounds) silently overflowing | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 974de36cc736d..886469837d184 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -198,14 +198,14 @@ Datetimelike
- Bug in :class:`DataFrame` and :class:`Series` constructors sometimes dropping nanoseconds from :class:`Timestamp` (resp. :class:`Timedelta`) ``data``, with ``dtype=datetime64[ns]`` (resp. ``timedelta64[ns]``) (:issue:`38032`)
- Bug in :meth:`DataFrame.first` and :meth:`Series.first` returning two months for offset one month when first day is last calendar day (:issue:`29623`)
- Bug in constructing a :class:`DataFrame` or :class:`Series` with mismatched ``datetime64`` data and ``timedelta64`` dtype, or vice-versa, failing to raise ``TypeError`` (:issue:`38575`, :issue:`38764`, :issue:`38792`)
-- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype (:issue:`38792`)
+- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype or a ``timedelta`` object ouf of bounds for ``timedelta64[ns]`` dtype (:issue:`38792`, :issue:`38965`)
- Bug in :meth:`DatetimeIndex.intersection`, :meth:`DatetimeIndex.symmetric_difference`, :meth:`PeriodIndex.intersection`, :meth:`PeriodIndex.symmetric_difference` always returning object-dtype when operating with :class:`CategoricalIndex` (:issue:`38741`)
- Bug in :meth:`Series.where` incorrectly casting ``datetime64`` values to ``int64`` (:issue:`37682`)
-
Timedelta
^^^^^^^^^
-
+- Bug in constructing :class:`Timedelta` from ``np.timedelta64`` objects with non-nanosecond units that are out of bounds for ``timedelta64[ns]`` (:issue:`38965`)
-
-
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index b2524c6bc6c0d..026fa719d1cc1 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -42,6 +42,7 @@ cdef extern from "numpy/ndarraytypes.h":
NPY_FR_ps
NPY_FR_fs
NPY_FR_as
+ NPY_FR_GENERIC
cdef extern from "src/datetime/np_datetime.h":
ctypedef struct pandas_timedeltastruct:
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index bc7def817c973..f3bf45f681b1f 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -24,7 +24,7 @@ PyDateTime_IMPORT
cimport pandas._libs.tslibs.util as util
from pandas._libs.tslibs.base cimport ABCTimestamp
-from pandas._libs.tslibs.conversion cimport cast_from_unit
+from pandas._libs.tslibs.conversion cimport cast_from_unit, precision_from_unit
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
@@ -32,7 +32,10 @@ from pandas._libs.tslibs.nattype cimport (
checknull_with_nat,
)
from pandas._libs.tslibs.np_datetime cimport (
+ NPY_DATETIMEUNIT,
cmp_scalar,
+ get_datetime64_unit,
+ get_timedelta64_value,
pandas_timedeltastruct,
td64_to_tdstruct,
)
@@ -156,7 +159,7 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
if isinstance(delta, _Timedelta):
delta = delta.value
if is_timedelta64_object(delta):
- return delta.astype("timedelta64[ns]").item()
+ return get_timedelta64_value(ensure_td64ns(delta))
if is_integer_object(delta):
return delta
if PyDelta_Check(delta):
@@ -169,6 +172,72 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
raise TypeError(type(delta))
+cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit):
+ if unit == NPY_DATETIMEUNIT.NPY_FR_ns or unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
+ # generic -> default to nanoseconds
+ return "ns"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_us:
+ return "us"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_ms:
+ return "ms"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_s:
+ return "s"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_m:
+ return "m"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_h:
+ return "h"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_D:
+ return "D"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_W:
+ return "W"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_M:
+ return "M"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_Y:
+ return "Y"
+ else:
+ raise NotImplementedError(unit)
+
+
+@cython.overflowcheck(True)
+cdef object ensure_td64ns(object ts):
+ """
+ Overflow-safe implementation of td64.astype("m8[ns]")
+
+ Parameters
+ ----------
+ ts : np.timedelta64
+
+ Returns
+ -------
+ np.timedelta64[ns]
+ """
+ cdef:
+ NPY_DATETIMEUNIT td64_unit
+ int64_t td64_value, mult
+ str unitstr
+
+ td64_unit = get_datetime64_unit(ts)
+ if (
+ td64_unit != NPY_DATETIMEUNIT.NPY_FR_ns
+ and td64_unit != NPY_DATETIMEUNIT.NPY_FR_GENERIC
+ ):
+ unitstr = npy_unit_to_abbrev(td64_unit)
+
+ td64_value = get_timedelta64_value(ts)
+
+ mult = precision_from_unit(unitstr)[0]
+ try:
+ # NB: cython#1381 this cannot be *=
+ td64_value = td64_value * mult
+ except OverflowError as err:
+ from pandas._libs.tslibs.conversion import OutOfBoundsTimedelta
+ raise OutOfBoundsTimedelta(ts)
+
+ return np.timedelta64(td64_value, "ns")
+
+ return ts
+
+
cdef convert_to_timedelta64(object ts, str unit):
"""
Convert an incoming object to a timedelta64 if possible.
@@ -184,37 +253,37 @@ cdef convert_to_timedelta64(object ts, str unit):
Return an ns based int64
"""
if checknull_with_nat(ts):
- return np.timedelta64(NPY_NAT)
+ return np.timedelta64(NPY_NAT, "ns")
elif isinstance(ts, _Timedelta):
# already in the proper format
- ts = np.timedelta64(ts.value)
+ ts = np.timedelta64(ts.value, "ns")
elif is_datetime64_object(ts):
# only accept a NaT here
if ts.astype('int64') == NPY_NAT:
return np.timedelta64(NPY_NAT)
elif is_timedelta64_object(ts):
- ts = ts.astype(f"m8[{unit.lower()}]")
+ ts = ensure_td64ns(ts)
elif is_integer_object(ts):
if ts == NPY_NAT:
- return np.timedelta64(NPY_NAT)
+ return np.timedelta64(NPY_NAT, "ns")
else:
if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(ts, unit)
else:
ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif is_float_object(ts):
if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(int(ts), unit)
else:
ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif isinstance(ts, str):
if len(ts) > 0 and ts[0] == 'P':
ts = parse_iso_format_string(ts)
else:
ts = parse_timedelta_string(ts)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif is_tick_object(ts):
ts = np.timedelta64(ts.nanos, 'ns')
@@ -1196,7 +1265,7 @@ class Timedelta(_Timedelta):
elif is_timedelta64_object(value):
if unit is not None:
value = value.astype(f'timedelta64[{unit}]')
- value = value.astype('timedelta64[ns]')
+ value = ensure_td64ns(value)
elif is_tick_object(value):
value = np.timedelta64(value.nanos, 'ns')
elif is_integer_object(value) or is_float_object(value):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 87f6e73e09d66..8065f85548f8c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -28,6 +28,7 @@
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
Period,
Timedelta,
Timestamp,
@@ -743,8 +744,12 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj,
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
- val = Timedelta(val).value
- dtype = np.dtype("m8[ns]")
+ try:
+ val = Timedelta(val).value
+ except (OutOfBoundsTimedelta, OverflowError):
+ dtype = np.dtype(object)
+ else:
+ dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
@@ -1386,7 +1391,7 @@ def try_timedelta(v):
try:
td_values = to_timedelta(v)
- except ValueError:
+ except (ValueError, OverflowError):
return v.reshape(shape)
else:
return np.asarray(td_values).reshape(shape)
@@ -1618,8 +1623,16 @@ def construct_2d_arraylike_from_scalar(
value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
) -> np.ndarray:
+ shape = (length, width)
+
if dtype.kind in ["m", "M"]:
value = maybe_unbox_datetimelike(value, dtype)
+ elif dtype == object:
+ if isinstance(value, (np.timedelta64, np.datetime64)):
+ # calling np.array below would cast to pytimedelta/pydatetime
+ out = np.empty(shape, dtype=object)
+ out.fill(value)
+ return out
# Attempt to coerce to a numpy array
try:
@@ -1632,7 +1645,6 @@ def construct_2d_arraylike_from_scalar(
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
- shape = (length, width)
return np.full(shape, arr)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index f408a3ddde04e..889bd98d6d85a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -10,7 +10,7 @@
import pytest
import pytz
-from pandas.compat.numpy import _np_version_under1p19, _np_version_under1p20
+from pandas.compat.numpy import _np_version_under1p19
from pandas.core.dtypes.common import is_integer_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype
@@ -2371,16 +2371,10 @@ def test_from_timedelta_scalar_preserves_nanos(self, constructor):
def test_from_timestamp_scalar_preserves_nanos(self, constructor):
ts = Timestamp.now() + Timedelta(1)
- obj = Series(ts, index=range(1), dtype="M8[ns]")
+ obj = constructor(ts, dtype="M8[ns]")
assert get1(obj) == ts
- def test_from_timedelta64_scalar_object(self, constructor, request):
- if getattr(constructor, "func", None) is DataFrame and _np_version_under1p20:
- # getattr check means we only xfail when box is None
- mark = pytest.mark.xfail(
- reason="np.array(td64, dtype=object) converts to int"
- )
- request.node.add_marker(mark)
+ def test_from_timedelta64_scalar_object(self, constructor):
td = Timedelta(1)
td64 = td.to_timedelta64()
@@ -2407,8 +2401,20 @@ def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request):
with pytest.raises(TypeError, match="Cannot cast"):
constructor(scalar, dtype=dtype)
- def test_from_out_of_bounds_datetime(self, constructor):
+ @pytest.mark.parametrize("cls", [datetime, np.datetime64])
+ def test_from_out_of_bounds_datetime(self, constructor, cls):
scalar = datetime(9999, 1, 1)
+ if cls is np.datetime64:
+ scalar = np.datetime64(scalar, "D")
+ result = constructor(scalar)
+
+ assert type(get1(result)) is cls
+
+ @pytest.mark.parametrize("cls", [timedelta, np.timedelta64])
+ def test_from_out_of_bounds_timedelta(self, constructor, cls):
+ scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1)
+ if cls is np.timedelta64:
+ scalar = np.timedelta64(scalar, "D")
result = constructor(scalar)
- assert type(get1(result)) is datetime
+ assert type(get1(result)) is cls
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index 06bdb8a6cf0a2..64d5a5e9b3fff 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas._libs.tslibs import OutOfBoundsTimedelta
+
from pandas import Timedelta, offsets, to_timedelta
@@ -197,6 +199,31 @@ def test_overflow_on_construction():
Timedelta(timedelta(days=13 * 19999))
+def test_construction_out_of_bounds_td64():
+ # TODO: parametrize over units just above/below the implementation bounds
+ # once GH#38964 is resolved
+
+ # Timedelta.max is just under 106752 days
+ td64 = np.timedelta64(106752, "D")
+ assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong
+
+ msg = "106752 days"
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ Timedelta(td64)
+
+ # But just back in bounds and we are OK
+ assert Timedelta(td64 - 1) == td64 - 1
+
+ td64 *= -1
+ assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong
+
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ Timedelta(td64)
+
+ # But just back in bounds and we are OK
+ assert Timedelta(td64 + 1) == td64 + 1
+
+
@pytest.mark.parametrize(
"fmt,exp",
[
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 0723a37b1ba82..54bfcbddfc3dd 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -10,7 +10,7 @@ def rewrite_exception(old_name: str, new_name: str):
try:
yield
except Exception as err:
- msg = err.args[0]
+ msg = str(err.args[0])
msg = msg.replace(old_name, new_name)
args: Tuple[str, ...] = (msg,)
if len(err.args) > 1:
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38965 | 2021-01-05T05:58:46Z | 2021-01-06T18:35:30Z | 2021-01-06T18:35:30Z | 2021-01-06T18:42:34Z |
DEPR: Rolling.win_type returning freq & is_datetimelike | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38b7a1d13c253..d35965ead1a1c 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -161,6 +161,8 @@ Deprecations
- Deprecated :meth:`MultiIndex.is_lexsorted` and :meth:`MultiIndex.lexsort_depth` as a public methods, users should use :meth:`MultiIndex.is_monotonic_increasing` instead (:issue:`32259`)
- Deprecated keyword ``try_cast`` in :meth:`Series.where`, :meth:`Series.mask`, :meth:`DataFrame.where`, :meth:`DataFrame.mask`; cast results manually if desired (:issue:`38836`)
- Deprecated comparison of :class:`Timestamp` object with ``datetime.date`` objects. Instead of e.g. ``ts <= mydate`` use ``ts <= pd.Timestamp(mydate)`` or ``ts.date() <= mydate`` (:issue:`36131`)
+- Deprecated :attr:`Rolling.win_type` returning ``"freq"`` (:issue:`38963`)
+- Deprecated :attr:`Rolling.is_datetimelike` (:issue:`38963`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 99426c55da29b..594c5899209df 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -5,6 +5,7 @@
"""
import collections
from typing import List
+import warnings
from pandas._typing import final
@@ -27,7 +28,10 @@ def _shallow_copy(self, obj, **kwargs):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
- kwargs[attr] = getattr(self, attr)
+ # TODO: Remove once win_type deprecation is enforced
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "win_type", FutureWarning)
+ kwargs[attr] = getattr(self, attr)
return self._constructor(obj, **kwargs)
@@ -59,7 +63,10 @@ def _gotitem(self, key, ndim, subset=None):
# we need to make a shallow copy of ourselves
# with the same groupby
- kwargs = {attr: getattr(self, attr) for attr in self._attributes}
+ # TODO: Remove once win_type deprecation is enforced
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "win_type", FutureWarning)
+ kwargs = {attr: getattr(self, attr) for attr in self._attributes}
# Try to select from a DataFrame, falling back to a Series
try:
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 7ae1e61d426b9..a4612a4c8ed5d 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -110,7 +110,8 @@ def __init__(
self.window = window
self.min_periods = min_periods
self.center = center
- self.win_type = win_type
+ # TODO: Change this back to self.win_type once deprecation is enforced
+ self._win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.method = method
self._win_freq_i8 = None
@@ -131,6 +132,27 @@ def __init__(
)
self.validate()
+ @property
+ def win_type(self):
+ if self._win_freq_i8 is not None:
+ warnings.warn(
+ "win_type will no longer return 'freq' in a future version. "
+ "Check the type of self.window instead.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return "freq"
+ return self._win_type
+
+ @property
+ def is_datetimelike(self):
+ warnings.warn(
+ "is_datetimelike is deprecated and will be removed in a future version.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return self._win_freq_i8 is not None
+
def validate(self) -> None:
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 52c629f96b713..7d3c29dc60be0 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -319,3 +319,9 @@ def test_multiple_agg_funcs(func, window_size, expected_vals):
result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]})
tm.assert_frame_equal(result, expected)
+
+
+def test_is_datetimelike_deprecated():
+ s = Series(range(1)).rolling(1)
+ with tm.assert_produces_warning(FutureWarning):
+ assert not s.is_datetimelike
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index 4b1028e165c80..1cfba6f020018 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -4,7 +4,7 @@
from pandas.errors import UnsupportedFunctionCall
import pandas.util._test_decorators as td
-from pandas import DataFrame, Series, Timedelta, concat
+from pandas import DataFrame, Series, Timedelta, concat, date_range
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
@@ -137,6 +137,12 @@ def test_consistent_win_type_freq(arg):
s.rolling(arg, win_type="freq")
+def test_win_type_freq_return_deprecation():
+ freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
+ with tm.assert_produces_warning(FutureWarning):
+ assert freq_roll.win_type == "freq"
+
+
@td.skip_if_no_scipy
def test_win_type_not_implemented():
class CustomIndexer(BaseIndexer):
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
xref: https://github.com/pandas-dev/pandas/pull/38641#issuecomment-754118989
xref: https://github.com/pandas-dev/pandas/pull/38664/files#r551419130 | https://api.github.com/repos/pandas-dev/pandas/pulls/38963 | 2021-01-05T05:17:07Z | 2021-01-06T14:27:58Z | 2021-01-06T14:27:58Z | 2021-01-08T17:26:42Z |
REF: move functions out of _testing/__init__ | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 0591fc6afd633..c51ceb750c338 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -1,27 +1,12 @@
-import bz2
from collections import Counter
-from contextlib import contextmanager
from datetime import datetime
from functools import wraps
-import gzip
import operator
import os
import re
import string
-from typing import (
- Any,
- Callable,
- ContextManager,
- List,
- Optional,
- Sequence,
- Tuple,
- Type,
- Union,
- cast,
-)
+from typing import Callable, ContextManager, List, Type
import warnings
-import zipfile
import numpy as np
@@ -31,8 +16,7 @@
set_locale,
)
-from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
-from pandas.compat import get_lzma_file, import_lzma
+from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
@@ -55,6 +39,22 @@
Series,
bdate_range,
)
+from pandas._testing._io import ( # noqa:F401
+ close,
+ network,
+ round_trip_localpath,
+ round_trip_pathlib,
+ round_trip_pickle,
+ with_connectivity_check,
+ write_to_compressed,
+)
+from pandas._testing._random import ( # noqa:F401
+ randbool,
+ rands,
+ rands_array,
+ randu_array,
+)
+from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
@@ -79,6 +79,7 @@
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
+ RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
@@ -89,13 +90,8 @@
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
-from pandas.io.common import urlopen
-
-lzma = import_lzma()
-
_N = 30
_K = 4
-_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
@@ -170,187 +166,6 @@ def reset_display_options():
pd.reset_option("^display.", silent=True)
-def round_trip_pickle(
- obj: Any, path: Optional[FilePathOrBuffer] = None
-) -> FrameOrSeries:
- """
- Pickle an object and then read it again.
-
- Parameters
- ----------
- obj : any object
- The object to pickle and then re-read.
- path : str, path object or file-like object, default None
- The path where the pickled object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was pickled and then re-read.
- """
- _path = path
- if _path is None:
- _path = f"__{rands(10)}__.pickle"
- with ensure_clean(_path) as temp_path:
- pd.to_pickle(obj, temp_path)
- return pd.read_pickle(temp_path)
-
-
-def round_trip_pathlib(writer, reader, path: Optional[str] = None):
- """
- Write an object to file specified by a pathlib.Path and read it back
-
- Parameters
- ----------
- writer : callable bound to pandas object
- IO writing function (e.g. DataFrame.to_csv )
- reader : callable
- IO reading function (e.g. pd.read_csv )
- path : str, default None
- The path where the object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was serialized and then re-read.
- """
- import pytest
-
- Path = pytest.importorskip("pathlib").Path
- if path is None:
- path = "___pathlib___"
- with ensure_clean(path) as path:
- writer(Path(path))
- obj = reader(Path(path))
- return obj
-
-
-def round_trip_localpath(writer, reader, path: Optional[str] = None):
- """
- Write an object to file specified by a py.path LocalPath and read it back.
-
- Parameters
- ----------
- writer : callable bound to pandas object
- IO writing function (e.g. DataFrame.to_csv )
- reader : callable
- IO reading function (e.g. pd.read_csv )
- path : str, default None
- The path where the object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was serialized and then re-read.
- """
- import pytest
-
- LocalPath = pytest.importorskip("py.path").local
- if path is None:
- path = "___localpath___"
- with ensure_clean(path) as path:
- writer(LocalPath(path))
- obj = reader(LocalPath(path))
- return obj
-
-
-def write_to_compressed(compression, path, data, dest="test"):
- """
- Write data to a compressed file.
-
- Parameters
- ----------
- compression : {'gzip', 'bz2', 'zip', 'xz'}
- The compression type to use.
- path : str
- The file path to write the data.
- data : str
- The data to write.
- dest : str, default "test"
- The destination file (for ZIP only)
-
- Raises
- ------
- ValueError : An invalid compression value was passed in.
- """
- args: Tuple[Any, ...] = (data,)
- mode = "wb"
- method = "write"
- compress_method: Callable
-
- if compression == "zip":
- compress_method = zipfile.ZipFile
- mode = "w"
- args = (dest, data)
- method = "writestr"
- elif compression == "gzip":
- compress_method = gzip.GzipFile
- elif compression == "bz2":
- compress_method = bz2.BZ2File
- elif compression == "xz":
- compress_method = get_lzma_file(lzma)
- else:
- raise ValueError(f"Unrecognized compression type: {compression}")
-
- with compress_method(path, mode=mode) as f:
- getattr(f, method)(*args)
-
-
-def randbool(size=(), p: float = 0.5):
- return np.random.rand(*size) <= p
-
-
-RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
-RANDU_CHARS = np.array(
- list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
- dtype=(np.unicode_, 1),
-)
-
-
-def rands_array(nchars, size, dtype="O"):
- """
- Generate an array of byte strings.
- """
- retval = (
- np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
- .view((np.str_, nchars))
- .reshape(size)
- )
- return retval.astype(dtype)
-
-
-def randu_array(nchars, size, dtype="O"):
- """
- Generate an array of unicode strings.
- """
- retval = (
- np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
- .view((np.unicode_, nchars))
- .reshape(size)
- )
- return retval.astype(dtype)
-
-
-def rands(nchars):
- """
- Generate one random byte string.
-
- See `rands_array` if you want to create an array of random strings.
-
- """
- return "".join(np.random.choice(RANDS_CHARS, nchars))
-
-
-def close(fignum=None):
- from matplotlib.pyplot import close as _close, get_fignums
-
- if fignum is None:
- for fignum in get_fignums():
- _close(fignum)
- else:
- _close(fignum)
-
-
# -----------------------------------------------------------------------------
# Comparators
@@ -935,449 +750,6 @@ def makeMissingDataframe(density=0.9, random_state=None):
return df
-def optional_args(decorator):
- """
- allows a decorator to take optional positional and keyword arguments.
- Assumes that taking a single, callable, positional argument means that
- it is decorating a function, i.e. something like this::
-
- @my_decorator
- def function(): pass
-
- Calls decorator with decorator(f, *args, **kwargs)
- """
-
- @wraps(decorator)
- def wrapper(*args, **kwargs):
- def dec(f):
- return decorator(f, *args, **kwargs)
-
- is_decorating = not kwargs and len(args) == 1 and callable(args[0])
- if is_decorating:
- f = args[0]
- # pandas\_testing.py:2331: error: Incompatible types in assignment
- # (expression has type "List[<nothing>]", variable has type
- # "Tuple[Any, ...]")
- args = [] # type: ignore[assignment]
- return dec(f)
- else:
- return dec
-
- return wrapper
-
-
-# skip tests on exceptions with this message
-_network_error_messages = (
- # 'urlopen error timed out',
- # 'timeout: timed out',
- # 'socket.timeout: timed out',
- "timed out",
- "Server Hangup",
- "HTTP Error 503: Service Unavailable",
- "502: Proxy Error",
- "HTTP Error 502: internal error",
- "HTTP Error 502",
- "HTTP Error 503",
- "HTTP Error 403",
- "HTTP Error 400",
- "Temporary failure in name resolution",
- "Name or service not known",
- "Connection refused",
- "certificate verify",
-)
-
-# or this e.errno/e.reason.errno
-_network_errno_vals = (
- 101, # Network is unreachable
- 111, # Connection refused
- 110, # Connection timed out
- 104, # Connection reset Error
- 54, # Connection reset by peer
- 60, # urllib.error.URLError: [Errno 60] Connection timed out
-)
-
-# Both of the above shouldn't mask real issues such as 404's
-# or refused connections (changed DNS).
-# But some tests (test_data yahoo) contact incredibly flakey
-# servers.
-
-# and conditionally raise on exception types in _get_default_network_errors
-
-
-def _get_default_network_errors():
- # Lazy import for http.client because it imports many things from the stdlib
- import http.client
-
- return (IOError, http.client.HTTPException, TimeoutError)
-
-
-def can_connect(url, error_classes=None):
- """
- Try to connect to the given url. True if succeeds, False if IOError
- raised
-
- Parameters
- ----------
- url : basestring
- The URL to try to connect to
-
- Returns
- -------
- connectable : bool
- Return True if no IOError (unable to connect) or URLError (bad url) was
- raised
- """
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- try:
- with urlopen(url):
- pass
- except error_classes:
- return False
- else:
- return True
-
-
-@optional_args
-def network(
- t,
- url="https://www.google.com",
- raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
- check_before_test=False,
- error_classes=None,
- skip_errnos=_network_errno_vals,
- _skip_on_messages=_network_error_messages,
-):
- """
- Label a test as requiring network connection and, if an error is
- encountered, only raise if it does not find a network connection.
-
- In comparison to ``network``, this assumes an added contract to your test:
- you must assert that, under normal conditions, your test will ONLY fail if
- it does not have network connectivity.
-
- You can call this in 3 ways: as a standard decorator, with keyword
- arguments, or with a positional argument that is the url to check.
-
- Parameters
- ----------
- t : callable
- The test requiring network connectivity.
- url : path
- The url to test via ``pandas.io.common.urlopen`` to check
- for connectivity. Defaults to 'https://www.google.com'.
- raise_on_error : bool
- If True, never catches errors.
- check_before_test : bool
- If True, checks connectivity before running the test case.
- error_classes : tuple or Exception
- error classes to ignore. If not in ``error_classes``, raises the error.
- defaults to IOError. Be careful about changing the error classes here.
- skip_errnos : iterable of int
- Any exception that has .errno or .reason.erno set to one
- of these values will be skipped with an appropriate
- message.
- _skip_on_messages: iterable of string
- any exception e for which one of the strings is
- a substring of str(e) will be skipped with an appropriate
- message. Intended to suppress errors where an errno isn't available.
-
- Notes
- -----
- * ``raise_on_error`` supersedes ``check_before_test``
-
- Returns
- -------
- t : callable
- The decorated test ``t``, with checks for connectivity errors.
-
- Example
- -------
-
- Tests decorated with @network will fail if it's possible to make a network
- connection to another URL (defaults to google.com)::
-
- >>> from pandas._testing import network
- >>> from pandas.io.common import urlopen
- >>> @network
- ... def test_network():
- ... with urlopen("rabbit://bonanza.com"):
- ... pass
- Traceback
- ...
- URLError: <urlopen error unknown url type: rabit>
-
- You can specify alternative URLs::
-
- >>> @network("https://www.yahoo.com")
- ... def test_something_with_yahoo():
- ... raise IOError("Failure Message")
- >>> test_something_with_yahoo()
- Traceback (most recent call last):
- ...
- IOError: Failure Message
-
- If you set check_before_test, it will check the url first and not run the
- test on failure::
-
- >>> @network("failing://url.blaher", check_before_test=True)
- ... def test_something():
- ... print("I ran!")
- ... raise ValueError("Failure")
- >>> test_something()
- Traceback (most recent call last):
- ...
-
- Errors not related to networking will always be raised.
- """
- from pytest import skip
-
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- t.network = True
-
- @wraps(t)
- def wrapper(*args, **kwargs):
- if (
- check_before_test
- and not raise_on_error
- and not can_connect(url, error_classes)
- ):
- skip()
- try:
- return t(*args, **kwargs)
- except Exception as err:
- errno = getattr(err, "errno", None)
- if not errno and hasattr(errno, "reason"):
- # pandas\_testing.py:2521: error: "Exception" has no attribute
- # "reason"
- errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
-
- if errno in skip_errnos:
- skip(f"Skipping test due to known errno and error {err}")
-
- e_str = str(err)
-
- if any(m.lower() in e_str.lower() for m in _skip_on_messages):
- skip(
- f"Skipping test because exception message is known and error {err}"
- )
-
- if not isinstance(err, error_classes):
- raise
-
- if raise_on_error or can_connect(url, error_classes):
- raise
- else:
- skip(f"Skipping test due to lack of connectivity and error {err}")
-
- return wrapper
-
-
-with_connectivity_check = network
-
-
-@contextmanager
-def assert_produces_warning(
- expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
- filter_level="always",
- check_stacklevel: bool = True,
- raise_on_extra_warnings: bool = True,
- match: Optional[str] = None,
-):
- """
- Context manager for running code expected to either raise a specific
- warning, or not raise any warnings. Verifies that the code raises the
- expected warning, and that it does not raise any other unexpected
- warnings. It is basically a wrapper around ``warnings.catch_warnings``.
-
- Parameters
- ----------
- expected_warning : {Warning, False, None}, default Warning
- The type of Exception raised. ``exception.Warning`` is the base
- class for all warnings. To check that no warning is returned,
- specify ``False`` or ``None``.
- filter_level : str or None, default "always"
- Specifies whether warnings are ignored, displayed, or turned
- into errors.
- Valid values are:
-
- * "error" - turns matching warnings into exceptions
- * "ignore" - discard the warning
- * "always" - always emit a warning
- * "default" - print the warning the first time it is generated
- from each location
- * "module" - print the warning the first time it is generated
- from each module
- * "once" - print the warning the first time it is generated
-
- check_stacklevel : bool, default True
- If True, displays the line that called the function containing
- the warning to show were the function is called. Otherwise, the
- line that implements the function is displayed.
- raise_on_extra_warnings : bool, default True
- Whether extra warnings not of the type `expected_warning` should
- cause the test to fail.
- match : str, optional
- Match warning message.
-
- Examples
- --------
- >>> import warnings
- >>> with assert_produces_warning():
- ... warnings.warn(UserWarning())
- ...
- >>> with assert_produces_warning(False):
- ... warnings.warn(RuntimeWarning())
- ...
- Traceback (most recent call last):
- ...
- AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
- >>> with assert_produces_warning(UserWarning):
- ... warnings.warn(RuntimeWarning())
- Traceback (most recent call last):
- ...
- AssertionError: Did not see expected warning of class 'UserWarning'.
-
- ..warn:: This is *not* thread-safe.
- """
- __tracebackhide__ = True
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter(filter_level)
- yield w
-
- if expected_warning:
- expected_warning = cast(Type[Warning], expected_warning)
- _assert_caught_expected_warning(
- caught_warnings=w,
- expected_warning=expected_warning,
- match=match,
- check_stacklevel=check_stacklevel,
- )
-
- if raise_on_extra_warnings:
- _assert_caught_no_extra_warnings(
- caught_warnings=w,
- expected_warning=expected_warning,
- )
-
-
-def _assert_caught_expected_warning(
- *,
- caught_warnings: Sequence[warnings.WarningMessage],
- expected_warning: Type[Warning],
- match: Optional[str],
- check_stacklevel: bool,
-) -> None:
- """Assert that there was the expected warning among the caught warnings."""
- saw_warning = False
- matched_message = False
-
- for actual_warning in caught_warnings:
- if issubclass(actual_warning.category, expected_warning):
- saw_warning = True
-
- if check_stacklevel and issubclass(
- actual_warning.category, (FutureWarning, DeprecationWarning)
- ):
- _assert_raised_with_correct_stacklevel(actual_warning)
-
- if match is not None and re.search(match, str(actual_warning.message)):
- matched_message = True
-
- if not saw_warning:
- raise AssertionError(
- f"Did not see expected warning of class "
- f"{repr(expected_warning.__name__)}"
- )
-
- if match and not matched_message:
- raise AssertionError(
- f"Did not see warning {repr(expected_warning.__name__)} "
- f"matching {match}"
- )
-
-
-def _assert_caught_no_extra_warnings(
- *,
- caught_warnings: Sequence[warnings.WarningMessage],
- expected_warning: Optional[Union[Type[Warning], bool]],
-) -> None:
- """Assert that no extra warnings apart from the expected ones are caught."""
- extra_warnings = []
-
- for actual_warning in caught_warnings:
- if _is_unexpected_warning(actual_warning, expected_warning):
- extra_warnings.append(
- (
- actual_warning.category.__name__,
- actual_warning.message,
- actual_warning.filename,
- actual_warning.lineno,
- )
- )
-
- if extra_warnings:
- raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
-
-
-def _is_unexpected_warning(
- actual_warning: warnings.WarningMessage,
- expected_warning: Optional[Union[Type[Warning], bool]],
-) -> bool:
- """Check if the actual warning issued is unexpected."""
- if actual_warning and not expected_warning:
- return True
- expected_warning = cast(Type[Warning], expected_warning)
- return bool(not issubclass(actual_warning.category, expected_warning))
-
-
-def _assert_raised_with_correct_stacklevel(
- actual_warning: warnings.WarningMessage,
-) -> None:
- from inspect import getframeinfo, stack
-
- caller = getframeinfo(stack()[4][0])
- msg = (
- "Warning not set with correct stacklevel. "
- f"File where warning is raised: {actual_warning.filename} != "
- f"{caller.filename}. Warning message: {actual_warning.message}"
- )
- assert actual_warning.filename == caller.filename, msg
-
-
-class RNGContext:
- """
- Context manager to set the numpy random number generator speed. Returns
- to the original value upon exiting the context manager.
-
- Parameters
- ----------
- seed : int
- Seed for numpy.random.seed
-
- Examples
- --------
- with RNGContext(42):
- np.random.randn()
- """
-
- def __init__(self, seed):
- self.seed = seed
-
- def __enter__(self):
-
- self.start_state = np.random.get_state()
- np.random.seed(self.seed)
-
- def __exit__(self, exc_type, exc_value, traceback):
-
- np.random.set_state(self.start_state)
-
-
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py
new file mode 100644
index 0000000000000..5f27b016b68a2
--- /dev/null
+++ b/pandas/_testing/_io.py
@@ -0,0 +1,405 @@
+import bz2
+from functools import wraps
+import gzip
+from typing import Any, Callable, Optional, Tuple
+import zipfile
+
+from pandas._typing import FilePathOrBuffer, FrameOrSeries
+from pandas.compat import get_lzma_file, import_lzma
+
+import pandas as pd
+from pandas._testing._random import rands
+from pandas._testing.contexts import ensure_clean
+
+from pandas.io.common import urlopen
+
+_RAISE_NETWORK_ERROR_DEFAULT = False
+
+lzma = import_lzma()
+
+# skip tests on exceptions with these messages
+_network_error_messages = (
+ # 'urlopen error timed out',
+ # 'timeout: timed out',
+ # 'socket.timeout: timed out',
+ "timed out",
+ "Server Hangup",
+ "HTTP Error 503: Service Unavailable",
+ "502: Proxy Error",
+ "HTTP Error 502: internal error",
+ "HTTP Error 502",
+ "HTTP Error 503",
+ "HTTP Error 403",
+ "HTTP Error 400",
+ "Temporary failure in name resolution",
+ "Name or service not known",
+ "Connection refused",
+ "certificate verify",
+)
+
+# or this e.errno/e.reason.errno
+_network_errno_vals = (
+ 101, # Network is unreachable
+ 111, # Connection refused
+ 110, # Connection timed out
+ 104, # Connection reset Error
+ 54, # Connection reset by peer
+ 60, # urllib.error.URLError: [Errno 60] Connection timed out
+)
+
+# Both of the above shouldn't mask real issues such as 404's
+# or refused connections (changed DNS).
+# But some tests (test_data yahoo) contact incredibly flakey
+# servers.
+
+# and conditionally raise on exception types in _get_default_network_errors
+
+
+def _get_default_network_errors():
+ # Lazy import for http.client because it imports many things from the stdlib
+ import http.client
+
+ return (IOError, http.client.HTTPException, TimeoutError)
+
+
+def optional_args(decorator):
+ """
+ allows a decorator to take optional positional and keyword arguments.
+ Assumes that taking a single, callable, positional argument means that
+ it is decorating a function, i.e. something like this::
+
+ @my_decorator
+ def function(): pass
+
+ Calls decorator with decorator(f, *args, **kwargs)
+ """
+
+ @wraps(decorator)
+ def wrapper(*args, **kwargs):
+ def dec(f):
+ return decorator(f, *args, **kwargs)
+
+ is_decorating = not kwargs and len(args) == 1 and callable(args[0])
+ if is_decorating:
+ f = args[0]
+ # pandas\_testing.py:2331: error: Incompatible types in assignment
+ # (expression has type "List[<nothing>]", variable has type
+ # "Tuple[Any, ...]")
+ args = [] # type: ignore[assignment]
+ return dec(f)
+ else:
+ return dec
+
+ return wrapper
+
+
+@optional_args
+def network(
+ t,
+ url="https://www.google.com",
+ raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
+ check_before_test=False,
+ error_classes=None,
+ skip_errnos=_network_errno_vals,
+ _skip_on_messages=_network_error_messages,
+):
+ """
+ Label a test as requiring network connection and, if an error is
+ encountered, only raise if it does not find a network connection.
+
+ In comparison to ``network``, this assumes an added contract to your test:
+ you must assert that, under normal conditions, your test will ONLY fail if
+ it does not have network connectivity.
+
+ You can call this in 3 ways: as a standard decorator, with keyword
+ arguments, or with a positional argument that is the url to check.
+
+ Parameters
+ ----------
+ t : callable
+ The test requiring network connectivity.
+ url : path
+ The url to test via ``pandas.io.common.urlopen`` to check
+ for connectivity. Defaults to 'https://www.google.com'.
+ raise_on_error : bool
+ If True, never catches errors.
+ check_before_test : bool
+ If True, checks connectivity before running the test case.
+ error_classes : tuple or Exception
+ error classes to ignore. If not in ``error_classes``, raises the error.
+ defaults to IOError. Be careful about changing the error classes here.
+ skip_errnos : iterable of int
+ Any exception that has .errno or .reason.erno set to one
+ of these values will be skipped with an appropriate
+ message.
+ _skip_on_messages: iterable of string
+ any exception e for which one of the strings is
+ a substring of str(e) will be skipped with an appropriate
+ message. Intended to suppress errors where an errno isn't available.
+
+ Notes
+ -----
+ * ``raise_on_error`` supersedes ``check_before_test``
+
+ Returns
+ -------
+ t : callable
+ The decorated test ``t``, with checks for connectivity errors.
+
+ Example
+ -------
+
+ Tests decorated with @network will fail if it's possible to make a network
+ connection to another URL (defaults to google.com)::
+
+ >>> from pandas._testing import network
+ >>> from pandas.io.common import urlopen
+ >>> @network
+ ... def test_network():
+ ... with urlopen("rabbit://bonanza.com"):
+ ... pass
+ Traceback
+ ...
+ URLError: <urlopen error unknown url type: rabit>
+
+ You can specify alternative URLs::
+
+ >>> @network("https://www.yahoo.com")
+ ... def test_something_with_yahoo():
+ ... raise IOError("Failure Message")
+ >>> test_something_with_yahoo()
+ Traceback (most recent call last):
+ ...
+ IOError: Failure Message
+
+ If you set check_before_test, it will check the url first and not run the
+ test on failure::
+
+ >>> @network("failing://url.blaher", check_before_test=True)
+ ... def test_something():
+ ... print("I ran!")
+ ... raise ValueError("Failure")
+ >>> test_something()
+ Traceback (most recent call last):
+ ...
+
+ Errors not related to networking will always be raised.
+ """
+ from pytest import skip
+
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
+ t.network = True
+
+ @wraps(t)
+ def wrapper(*args, **kwargs):
+ if (
+ check_before_test
+ and not raise_on_error
+ and not can_connect(url, error_classes)
+ ):
+ skip()
+ try:
+ return t(*args, **kwargs)
+ except Exception as err:
+ errno = getattr(err, "errno", None)
+ if not errno and hasattr(errno, "reason"):
+ # pandas\_testing.py:2521: error: "Exception" has no attribute
+ # "reason"
+ errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
+
+ if errno in skip_errnos:
+ skip(f"Skipping test due to known errno and error {err}")
+
+ e_str = str(err)
+
+ if any(m.lower() in e_str.lower() for m in _skip_on_messages):
+ skip(
+ f"Skipping test because exception message is known and error {err}"
+ )
+
+ if not isinstance(err, error_classes):
+ raise
+
+ if raise_on_error or can_connect(url, error_classes):
+ raise
+ else:
+ skip(f"Skipping test due to lack of connectivity and error {err}")
+
+ return wrapper
+
+
+with_connectivity_check = network
+
+
+def can_connect(url, error_classes=None):
+ """
+ Try to connect to the given url. True if succeeds, False if IOError
+ raised
+
+ Parameters
+ ----------
+ url : basestring
+ The URL to try to connect to
+
+ Returns
+ -------
+ connectable : bool
+ Return True if no IOError (unable to connect) or URLError (bad url) was
+ raised
+ """
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
+ try:
+ with urlopen(url):
+ pass
+ except error_classes:
+ return False
+ else:
+ return True
+
+
+# ------------------------------------------------------------------
+# File-IO
+
+
+def round_trip_pickle(
+ obj: Any, path: Optional[FilePathOrBuffer] = None
+) -> FrameOrSeries:
+ """
+ Pickle an object and then read it again.
+
+ Parameters
+ ----------
+ obj : any object
+ The object to pickle and then re-read.
+ path : str, path object or file-like object, default None
+ The path where the pickled object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was pickled and then re-read.
+ """
+ _path = path
+ if _path is None:
+ _path = f"__{rands(10)}__.pickle"
+ with ensure_clean(_path) as temp_path:
+ pd.to_pickle(obj, temp_path)
+ return pd.read_pickle(temp_path)
+
+
+def round_trip_pathlib(writer, reader, path: Optional[str] = None):
+ """
+ Write an object to file specified by a pathlib.Path and read it back
+
+ Parameters
+ ----------
+ writer : callable bound to pandas object
+ IO writing function (e.g. DataFrame.to_csv )
+ reader : callable
+ IO reading function (e.g. pd.read_csv )
+ path : str, default None
+ The path where the object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was serialized and then re-read.
+ """
+ import pytest
+
+ Path = pytest.importorskip("pathlib").Path
+ if path is None:
+ path = "___pathlib___"
+ with ensure_clean(path) as path:
+ writer(Path(path))
+ obj = reader(Path(path))
+ return obj
+
+
+def round_trip_localpath(writer, reader, path: Optional[str] = None):
+ """
+ Write an object to file specified by a py.path LocalPath and read it back.
+
+ Parameters
+ ----------
+ writer : callable bound to pandas object
+ IO writing function (e.g. DataFrame.to_csv )
+ reader : callable
+ IO reading function (e.g. pd.read_csv )
+ path : str, default None
+ The path where the object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was serialized and then re-read.
+ """
+ import pytest
+
+ LocalPath = pytest.importorskip("py.path").local
+ if path is None:
+ path = "___localpath___"
+ with ensure_clean(path) as path:
+ writer(LocalPath(path))
+ obj = reader(LocalPath(path))
+ return obj
+
+
+def write_to_compressed(compression, path, data, dest="test"):
+ """
+ Write data to a compressed file.
+
+ Parameters
+ ----------
+ compression : {'gzip', 'bz2', 'zip', 'xz'}
+ The compression type to use.
+ path : str
+ The file path to write the data.
+ data : str
+ The data to write.
+ dest : str, default "test"
+ The destination file (for ZIP only)
+
+ Raises
+ ------
+ ValueError : An invalid compression value was passed in.
+ """
+ args: Tuple[Any, ...] = (data,)
+ mode = "wb"
+ method = "write"
+ compress_method: Callable
+
+ if compression == "zip":
+ compress_method = zipfile.ZipFile
+ mode = "w"
+ args = (dest, data)
+ method = "writestr"
+ elif compression == "gzip":
+ compress_method = gzip.GzipFile
+ elif compression == "bz2":
+ compress_method = bz2.BZ2File
+ elif compression == "xz":
+ compress_method = get_lzma_file(lzma)
+ else:
+ raise ValueError(f"Unrecognized compression type: {compression}")
+
+ with compress_method(path, mode=mode) as f:
+ getattr(f, method)(*args)
+
+
+# ------------------------------------------------------------------
+# Plotting
+
+
+def close(fignum=None):
+ from matplotlib.pyplot import close as _close, get_fignums
+
+ if fignum is None:
+ for fignum in get_fignums():
+ _close(fignum)
+ else:
+ _close(fignum)
diff --git a/pandas/_testing/_random.py b/pandas/_testing/_random.py
new file mode 100644
index 0000000000000..a646d7639a4e6
--- /dev/null
+++ b/pandas/_testing/_random.py
@@ -0,0 +1,48 @@
+import string
+
+import numpy as np
+
+
+def randbool(size=(), p: float = 0.5):
+ return np.random.rand(*size) <= p
+
+
+RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
+RANDU_CHARS = np.array(
+ list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
+ dtype=(np.unicode_, 1),
+)
+
+
+def rands_array(nchars, size, dtype="O"):
+ """
+ Generate an array of byte strings.
+ """
+ retval = (
+ np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
+ .view((np.str_, nchars))
+ .reshape(size)
+ )
+ return retval.astype(dtype)
+
+
+def randu_array(nchars, size, dtype="O"):
+ """
+ Generate an array of unicode strings.
+ """
+ retval = (
+ np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
+ .view((np.unicode_, nchars))
+ .reshape(size)
+ )
+ return retval.astype(dtype)
+
+
+def rands(nchars):
+ """
+ Generate one random byte string.
+
+ See `rands_array` if you want to create an array of random strings.
+
+ """
+ return "".join(np.random.choice(RANDS_CHARS, nchars))
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
new file mode 100644
index 0000000000000..6429f74637f01
--- /dev/null
+++ b/pandas/_testing/_warnings.py
@@ -0,0 +1,174 @@
+from contextlib import contextmanager
+import re
+from typing import Optional, Sequence, Type, Union, cast
+import warnings
+
+
+@contextmanager
+def assert_produces_warning(
+ expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
+ filter_level="always",
+ check_stacklevel: bool = True,
+ raise_on_extra_warnings: bool = True,
+ match: Optional[str] = None,
+):
+ """
+ Context manager for running code expected to either raise a specific
+ warning, or not raise any warnings. Verifies that the code raises the
+ expected warning, and that it does not raise any other unexpected
+ warnings. It is basically a wrapper around ``warnings.catch_warnings``.
+
+ Parameters
+ ----------
+ expected_warning : {Warning, False, None}, default Warning
+ The type of Exception raised. ``exception.Warning`` is the base
+ class for all warnings. To check that no warning is returned,
+ specify ``False`` or ``None``.
+ filter_level : str or None, default "always"
+ Specifies whether warnings are ignored, displayed, or turned
+ into errors.
+ Valid values are:
+
+ * "error" - turns matching warnings into exceptions
+ * "ignore" - discard the warning
+ * "always" - always emit a warning
+ * "default" - print the warning the first time it is generated
+ from each location
+ * "module" - print the warning the first time it is generated
+ from each module
+ * "once" - print the warning the first time it is generated
+
+ check_stacklevel : bool, default True
+ If True, displays the line that called the function containing
+ the warning to show were the function is called. Otherwise, the
+ line that implements the function is displayed.
+ raise_on_extra_warnings : bool, default True
+ Whether extra warnings not of the type `expected_warning` should
+ cause the test to fail.
+ match : str, optional
+ Match warning message.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with assert_produces_warning():
+ ... warnings.warn(UserWarning())
+ ...
+ >>> with assert_produces_warning(False):
+ ... warnings.warn(RuntimeWarning())
+ ...
+ Traceback (most recent call last):
+ ...
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
+ >>> with assert_produces_warning(UserWarning):
+ ... warnings.warn(RuntimeWarning())
+ Traceback (most recent call last):
+ ...
+ AssertionError: Did not see expected warning of class 'UserWarning'.
+
+ ..warn:: This is *not* thread-safe.
+ """
+ __tracebackhide__ = True
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter(filter_level)
+ yield w
+
+ if expected_warning:
+ expected_warning = cast(Type[Warning], expected_warning)
+ _assert_caught_expected_warning(
+ caught_warnings=w,
+ expected_warning=expected_warning,
+ match=match,
+ check_stacklevel=check_stacklevel,
+ )
+
+ if raise_on_extra_warnings:
+ _assert_caught_no_extra_warnings(
+ caught_warnings=w,
+ expected_warning=expected_warning,
+ )
+
+
+def _assert_caught_expected_warning(
+ *,
+ caught_warnings: Sequence[warnings.WarningMessage],
+ expected_warning: Type[Warning],
+ match: Optional[str],
+ check_stacklevel: bool,
+) -> None:
+ """Assert that there was the expected warning among the caught warnings."""
+ saw_warning = False
+ matched_message = False
+
+ for actual_warning in caught_warnings:
+ if issubclass(actual_warning.category, expected_warning):
+ saw_warning = True
+
+ if check_stacklevel and issubclass(
+ actual_warning.category, (FutureWarning, DeprecationWarning)
+ ):
+ _assert_raised_with_correct_stacklevel(actual_warning)
+
+ if match is not None and re.search(match, str(actual_warning.message)):
+ matched_message = True
+
+ if not saw_warning:
+ raise AssertionError(
+ f"Did not see expected warning of class "
+ f"{repr(expected_warning.__name__)}"
+ )
+
+ if match and not matched_message:
+ raise AssertionError(
+ f"Did not see warning {repr(expected_warning.__name__)} "
+ f"matching {match}"
+ )
+
+
+def _assert_caught_no_extra_warnings(
+ *,
+ caught_warnings: Sequence[warnings.WarningMessage],
+ expected_warning: Optional[Union[Type[Warning], bool]],
+) -> None:
+ """Assert that no extra warnings apart from the expected ones are caught."""
+ extra_warnings = []
+
+ for actual_warning in caught_warnings:
+ if _is_unexpected_warning(actual_warning, expected_warning):
+ extra_warnings.append(
+ (
+ actual_warning.category.__name__,
+ actual_warning.message,
+ actual_warning.filename,
+ actual_warning.lineno,
+ )
+ )
+
+ if extra_warnings:
+ raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
+
+
+def _is_unexpected_warning(
+ actual_warning: warnings.WarningMessage,
+ expected_warning: Optional[Union[Type[Warning], bool]],
+) -> bool:
+ """Check if the actual warning issued is unexpected."""
+ if actual_warning and not expected_warning:
+ return True
+ expected_warning = cast(Type[Warning], expected_warning)
+ return bool(not issubclass(actual_warning.category, expected_warning))
+
+
+def _assert_raised_with_correct_stacklevel(
+ actual_warning: warnings.WarningMessage,
+) -> None:
+ from inspect import getframeinfo, stack
+
+ caller = getframeinfo(stack()[4][0])
+ msg = (
+ "Warning not set with correct stacklevel. "
+ f"File where warning is raised: {actual_warning.filename} != "
+ f"{caller.filename}. Warning message: {actual_warning.message}"
+ )
+ assert actual_warning.filename == caller.filename, msg
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index d6a4b47571653..d72dc8c3af104 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -3,6 +3,8 @@
from shutil import rmtree
import tempfile
+import numpy as np
+
from pandas.io.common import get_handle
@@ -214,3 +216,32 @@ def use_numexpr(use, min_elements=None):
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
+
+
+class RNGContext:
+ """
+ Context manager to set the numpy random number generator speed. Returns
+ to the original value upon exiting the context manager.
+
+ Parameters
+ ----------
+ seed : int
+ Seed for numpy.random.seed
+
+ Examples
+ --------
+ with RNGContext(42):
+ np.random.randn()
+ """
+
+ def __init__(self, seed):
+ self.seed = seed
+
+ def __enter__(self):
+
+ self.start_state = np.random.get_state()
+ np.random.seed(self.seed)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+
+ np.random.set_state(self.start_state)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38961 | 2021-01-04T23:52:11Z | 2021-01-05T02:18:14Z | 2021-01-05T02:18:14Z | 2021-01-05T02:26:32Z | |
TST: strict xfail | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2862f7c957abc..bf5e632374b59 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -473,7 +473,7 @@ def index_with_missing(request):
Fixture for indices with missing values
"""
if request.param in ["int", "uint", "range", "empty", "repeats"]:
- pytest.xfail("missing values not supported")
+ pytest.skip("missing values not supported")
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# Azure pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 8735e2a09920d..ac2e300f9f8d6 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -436,7 +436,20 @@ def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
],
ids=["datetime64", "datetime64tz"],
)
- def test_insert_index_datetimes(self, fill_val, exp_dtype):
+ @pytest.mark.parametrize(
+ "insert_value",
+ [pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), 1],
+ )
+ def test_insert_index_datetimes(self, request, fill_val, exp_dtype, insert_value):
+ if not hasattr(insert_value, "tz"):
+ request.node.add_marker(
+ pytest.mark.xfail(reason="ToDo: must coerce to object")
+ )
+ elif fill_val.tz != insert_value.tz:
+ request.node.add_marker(
+ pytest.mark.xfail(reason="GH 37605 - require tz equality?")
+ )
+
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
)
@@ -448,25 +461,7 @@ def test_insert_index_datetimes(self, fill_val, exp_dtype):
)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
- if fill_val.tz:
- msg = "Cannot compare tz-naive and tz-aware"
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01"))
-
- msg = "Timezones don't match"
- with pytest.raises(ValueError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
-
- else:
- msg = "Cannot compare tz-naive and tz-aware"
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
-
- msg = "value should be a 'Timestamp' or 'NaT'. Got 'int' instead."
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, 1)
-
- pytest.xfail("ToDo: must coerce to object")
+ obj.insert(1, insert_value)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"])
diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index b2ac28e1865d6..5edef896be537 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -98,12 +98,10 @@ def _get_offset(self, klass, value=1, normalize=False):
klass = klass(value, normalize=normalize)
return klass
- def test_apply_out_of_range(self, tz_naive_fixture):
+ def test_apply_out_of_range(self, request, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
- if isinstance(tz, tzlocal) and not IS64:
- pytest.xfail(reason="OverflowError inside tzlocal past 2038")
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
@@ -123,6 +121,13 @@ def test_apply_out_of_range(self, tz_naive_fixture):
t = Timestamp("20080101", tz=tz)
result = t + offset
assert isinstance(result, datetime)
+
+ if isinstance(tz, tzlocal) and not IS64:
+ # If we hit OutOfBoundsDatetime on non-64 bit machines
+ # we'll drop out of the try clause before the next test
+ request.node.add_marker(
+ pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
+ )
assert t.tzinfo == result.tzinfo
except OutOfBoundsDatetime:
| - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Part of #38902. | https://api.github.com/repos/pandas-dev/pandas/pulls/38960 | 2021-01-04T23:37:53Z | 2021-01-09T21:46:29Z | 2021-01-09T21:46:29Z | 2021-02-03T02:48:45Z |
TYP/CLN: assorted cleanups | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 1339dee954603..4dc14397a30f4 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -3565,7 +3565,7 @@ cpdef to_offset(freq):
f"to_offset does not support tuples {freq}, pass as a string instead"
)
- elif isinstance(freq, timedelta):
+ elif PyDelta_Check(freq):
return delta_to_tick(freq)
elif isinstance(freq, str):
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2862f7c957abc..9fc1f0509d232 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1002,14 +1002,6 @@ def tz_aware_fixture(request):
tz_aware_fixture2 = tz_aware_fixture
-@pytest.fixture(scope="module")
-def datetime_tz_utc():
- """
- Yields the UTC timezone object from the datetime module.
- """
- return timezone.utc
-
-
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
@@ -1189,7 +1181,7 @@ def any_nullable_int_dtype(request):
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES)
-def any_numeric_dtype(request):
+def any_nullable_numeric_dtype(request):
"""
Parameterized fixture for any nullable integer dtype and
any float ea dtypes.
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 7f4e16dc236ac..94c7d325d0bc8 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -658,7 +658,7 @@ def _astype(self, dtype: DtypeObj, copy: bool) -> ArrayLike:
values = values.astype(dtype, copy=copy)
else:
- values = astype_nansafe(values, dtype, copy=True)
+ values = astype_nansafe(values, dtype, copy=copy)
return values
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 013e52248f5c4..f97077954f8bf 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -413,7 +413,7 @@ def _get_empty_dtype_and_na(join_units: Sequence[JoinUnit]) -> Tuple[DtypeObj, A
return np.dtype("M8[ns]"), np.datetime64("NaT", "ns")
elif "timedelta" in upcast_classes:
return np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")
- else: # pragma
+ else:
try:
common_dtype = np.find_common_type(upcast_classes, [])
except TypeError:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7f2039c998f53..3f22f14766a07 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4494,7 +4494,7 @@ def replace(
method=method,
)
- def _replace_single(self, to_replace, method, inplace, limit):
+ def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 341a8a9f90b96..ceaf6e1ac21e5 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -80,6 +80,8 @@
if TYPE_CHECKING:
from tables import Col, File, Node
+ from pandas.core.internals import Block
+
# versioning attribute
_version = "0.15.2"
@@ -3860,9 +3862,6 @@ def _create_axes(
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
- def get_blk_items(mgr, blocks):
- return [mgr.items.take(blk.mgr_locs) for blk in blocks]
-
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
@@ -3870,10 +3869,10 @@ def get_blk_items(mgr, blocks):
data_columns, min_itemsize, new_non_index_axes
)
- block_obj = self.get_object(obj, transposed)._consolidate()
+ frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
- block_obj, table_exists, new_non_index_axes, self.values_axes, data_columns
+ frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
@@ -3978,27 +3977,31 @@ def get_blk_items(mgr, blocks):
@staticmethod
def _get_blocks_and_items(
- block_obj, table_exists, new_non_index_axes, values_axes, data_columns
+ frame: DataFrame,
+ table_exists: bool,
+ new_non_index_axes,
+ values_axes,
+ data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
- def get_blk_items(mgr, blocks):
- return [mgr.items.take(blk.mgr_locs) for blk in blocks]
+ def get_blk_items(mgr):
+ return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
- blocks = block_obj._mgr.blocks
- blk_items = get_blk_items(block_obj._mgr, blocks)
+ blocks: List["Block"] = list(frame._mgr.blocks)
+ blk_items: List[Index] = get_blk_items(frame._mgr)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
- mgr = block_obj.reindex(new_labels, axis=axis)._mgr
+ mgr = frame.reindex(new_labels, axis=axis)._mgr
blocks = list(mgr.blocks)
- blk_items = get_blk_items(mgr, blocks)
+ blk_items = get_blk_items(mgr)
for c in data_columns:
- mgr = block_obj.reindex([c], axis=axis)._mgr
+ mgr = frame.reindex([c], axis=axis)._mgr
blocks.extend(mgr.blocks)
- blk_items.extend(get_blk_items(mgr, mgr.blocks))
+ blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
@@ -4006,7 +4009,7 @@ def get_blk_items(mgr, blocks):
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
- new_blocks = []
+ new_blocks: List["Block"] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
@@ -4875,7 +4878,7 @@ def _unconvert_index(
def _maybe_convert_for_string_atom(
- name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
+ name: str, block: "Block", existing_col, min_itemsize, nan_rep, encoding, errors
):
if not block.is_object:
return block.values
@@ -4895,11 +4898,12 @@ def _maybe_convert_for_string_atom(
elif not (inferred_type == "string" or dtype_name == "object"):
return block.values
- block = block.fillna(nan_rep, downcast=False)
- if isinstance(block, list):
- # Note: because block is always object dtype, fillna goes
- # through a path such that the result is always a 1-element list
- block = block[0]
+ blocks: List["Block"] = block.fillna(nan_rep, downcast=False)
+ # Note: because block is always object dtype, fillna goes
+ # through a path such that the result is always a 1-element list
+ assert len(blocks) == 1
+ block = blocks[0]
+
data = block.values
# see if we have a valid string type
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index add1bd4bb3972..e448cf0b578ae 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -1159,7 +1159,6 @@ def test_dti_union_mixed(self):
@pytest.mark.parametrize(
"tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)]
)
- @pytest.mark.usefixtures("datetime_tz_utc")
def test_iteration_preserves_nanoseconds(self, tz):
# GH 19603
index = DatetimeIndex(
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index e5f509acf4734..6d53fe4563e41 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -1,5 +1,4 @@
import numpy as np
-import pytest
from pandas import Series, timedelta_range
import pandas._testing as tm
@@ -22,13 +21,6 @@ def test_partial_slice(self):
expected = s.iloc[:134]
tm.assert_series_equal(result, expected)
- result = s["6 days, 23:11:12"]
- assert result == s.iloc[133]
-
- msg = r"^Timedelta\('50 days 00:00:00'\)$"
- with pytest.raises(KeyError, match=msg):
- s["50 days"]
-
def test_partial_slice_high_reso(self):
# higher reso
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 2022bca514540..a7a60f37bcd00 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -18,6 +18,7 @@
Timestamp,
date_range,
period_range,
+ timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -121,6 +122,23 @@ def test_getitem_scalar_categorical_index(self):
result = ser[cats[0]]
assert result == expected
+ def test_getitem_str_with_timedeltaindex(self):
+ rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
+ ser = Series(np.arange(len(rng)), index=rng)
+
+ key = "6 days, 23:11:12"
+ indexer = rng.get_loc(key)
+ assert indexer == 133
+
+ result = ser[key]
+ assert result == ser.iloc[133]
+
+ msg = r"^Timedelta\('50 days 00:00:00'\)$"
+ with pytest.raises(KeyError, match=msg):
+ rng.get_loc("50 days")
+ with pytest.raises(KeyError, match=msg):
+ ser["50 days"]
+
class TestSeriesGetitemSlices:
def test_getitem_partial_str_slice_with_datetimeindex(self):
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index d6d0723bee0e8..47641d49c7a09 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -167,19 +167,19 @@ def test_setitem_boolean_td64_values_cast_na(self, value):
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
- def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):
+ def test_setitem_boolean_nullable_int_types(self, any_nullable_numeric_dtype):
# GH: 26468
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
- expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ ser[ser > 6] = Series(range(4), dtype=any_nullable_numeric_dtype)
+ expected = Series([5, 6, 2, 3], dtype=any_nullable_numeric_dtype)
tm.assert_series_equal(ser, expected)
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ ser.loc[ser > 6] = Series(range(4), dtype=any_nullable_numeric_dtype)
tm.assert_series_equal(ser, expected)
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- loc_ser = Series(range(4), dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ loc_ser = Series(range(4), dtype=any_nullable_numeric_dtype)
ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]
tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c7bd38bbd00b9..d7cd92c8e3362 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -272,8 +272,8 @@ def test_constructor_index_dtype(self, dtype):
[
([1, 2]),
(["1", "2"]),
- (list(pd.date_range("1/1/2011", periods=2, freq="H"))),
- (list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
+ (list(date_range("1/1/2011", periods=2, freq="H"))),
+ (list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([Interval(left=0, right=5)]),
],
)
@@ -628,10 +628,10 @@ def test_constructor_copy(self):
@pytest.mark.parametrize(
"index",
[
- pd.date_range("20170101", periods=3, tz="US/Eastern"),
- pd.date_range("20170101", periods=3),
- pd.timedelta_range("1 day", periods=3),
- pd.period_range("2012Q1", periods=3, freq="Q"),
+ date_range("20170101", periods=3, tz="US/Eastern"),
+ date_range("20170101", periods=3),
+ timedelta_range("1 day", periods=3),
+ period_range("2012Q1", periods=3, freq="Q"),
Index(list("abc")),
pd.Int64Index([1, 2, 3]),
RangeIndex(0, 3),
@@ -1038,16 +1038,16 @@ def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
- s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
+ ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))
- result = Series(s, dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser, dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
- result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
- result = Series(s.values, dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser.values, dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
@@ -1374,7 +1374,7 @@ def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
- expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
+ expected = Series(timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
| https://api.github.com/repos/pandas-dev/pandas/pulls/38959 | 2021-01-04T22:45:54Z | 2021-01-05T00:14:05Z | 2021-01-05T00:14:05Z | 2021-01-05T01:18:03Z | |
Backport PR #38957 on branch 1.2.x (DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e054ac830ce41..64552b104c053 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -716,6 +716,19 @@ apply and applymap on ``DataFrame`` evaluates first row/column only once
df.apply(func, axis=1)
+.. _whatsnew_110.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_110.api_breaking.testing.check_freq:
+
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
+
+
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 37298d12a12c4..e9602bbe1cee1 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -10,20 +10,6 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
-.. _whatsnew_121.api_breaking:
-
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _whatsnew_121.api_breaking.testing.check_freq:
-
-Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_121.regressions:
Fixed regressions
@@ -62,7 +48,7 @@ I/O
Other
~~~~~
- Fixed build failure on MacOS 11 in Python 3.9.1 (:issue:`38766`)
--
+- Added reference to backwards incompatible ``check_freq`` arg of :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in :ref:`pandas 1.1.0 whats new <whatsnew_110.api_breaking.testing.check_freq>` (:issue:`34050`)
.. ---------------------------------------------------------------------------
| Backport PR #38957: DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst | https://api.github.com/repos/pandas-dev/pandas/pulls/38958 | 2021-01-04T22:45:26Z | 2021-01-05T00:11:03Z | 2021-01-05T00:11:03Z | 2021-01-05T00:11:03Z |
DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e054ac830ce41..64552b104c053 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -716,6 +716,19 @@ apply and applymap on ``DataFrame`` evaluates first row/column only once
df.apply(func, axis=1)
+.. _whatsnew_110.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_110.api_breaking.testing.check_freq:
+
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
+
+
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 37298d12a12c4..e9602bbe1cee1 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -10,20 +10,6 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
-.. _whatsnew_121.api_breaking:
-
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _whatsnew_121.api_breaking.testing.check_freq:
-
-Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_121.regressions:
Fixed regressions
@@ -62,7 +48,7 @@ I/O
Other
~~~~~
- Fixed build failure on MacOS 11 in Python 3.9.1 (:issue:`38766`)
--
+- Added reference to backwards incompatible ``check_freq`` arg of :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in :ref:`pandas 1.1.0 whats new <whatsnew_110.api_breaking.testing.check_freq>` (:issue:`34050`)
.. ---------------------------------------------------------------------------
| and add reference to 1.1.0 whats new update in v1.2.1.rst
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This moves the doc notes for `assert_series_equal` and `assert_frame_equal` from the 1.2.1 whats new to the 1.1.0 whats new and adds a small reference to the section in the 1.1.0 page in the 1.2.1 page. This came up in #38471 and makes more sense to me than the current approach, but happy to close if current state is preferred.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38957 | 2021-01-04T20:42:11Z | 2021-01-04T22:45:12Z | 2021-01-04T22:45:11Z | 2021-01-05T15:37:43Z |
CLN: Unify number recognition tests for all parsers | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3058d1eed22b9..6e9cc18358153 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2349,12 +2349,12 @@ def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
decimal = re.escape(self.decimal)
if self.thousands is None:
- regex = fr"^\-?[0-9]*({decimal}[0-9]*)?([0-9](E|e)\-?[0-9]*)?$"
+ regex = fr"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
else:
thousands = re.escape(self.thousands)
regex = (
- fr"^\-?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
- fr"([0-9](E|e)\-?[0-9]*)?$"
+ fr"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
+ fr"([0-9]?(E|e)\-?[0-9]+)?$"
)
self.num = re.compile(regex)
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index ec098353960d7..321678c36943a 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -148,3 +148,58 @@ def encoding_fmt(request):
Fixture for all possible string formats of a UTF encoding.
"""
return request.param
+
+
+@pytest.fixture(
+ params=[
+ ("-1,0", -1.0),
+ ("-1,2e0", -1.2),
+ ("-1e0", -1.0),
+ ("+1e0", 1.0),
+ ("+1e+0", 1.0),
+ ("+1e-1", 0.1),
+ ("+,1e1", 1.0),
+ ("+1,e0", 1.0),
+ ("-,1e1", -1.0),
+ ("-1,e0", -1.0),
+ ("0,1", 0.1),
+ ("1,", 1.0),
+ (",1", 0.1),
+ ("-,1", -0.1),
+ ("1_,", 1.0),
+ ("1_234,56", 1234.56),
+ ("1_234,56e0", 1234.56),
+ # negative cases; must not parse as float
+ ("_", "_"),
+ ("-_", "-_"),
+ ("-_1", "-_1"),
+ ("-_1e0", "-_1e0"),
+ ("_1", "_1"),
+ ("_1,", "_1,"),
+ ("_1,_", "_1,_"),
+ ("_1e0", "_1e0"),
+ ("1,2e_1", "1,2e_1"),
+ ("1,2e1_0", "1,2e1_0"),
+ ("1,_2", "1,_2"),
+ (",1__2", ",1__2"),
+ (",1e", ",1e"),
+ ("-,1e", "-,1e"),
+ ("1_000,000_000", "1_000,000_000"),
+ ("1,e1_2", "1,e1_2"),
+ ("e11,2", "e11,2"),
+ ("1e11,2", "1e11,2"),
+ ("1,2,2", "1,2,2"),
+ ("1,2_1", "1,2_1"),
+ ("1,2e-10e1", "1,2e-10e1"),
+ ("--1,2", "--1,2"),
+ ("1a_2,1", "1a_2,1"),
+ ("1,2E-1", 0.12),
+ ("1,2E1", 12.0),
+ ]
+)
+def numeric_decimal(request):
+ """
+ Fixture for all numeric formats which should get recognized. The first entry
+ represents the value to read while the second represents the expected result.
+ """
+ return request.param
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index fc34d65fdad52..ec1ccf009b8de 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -181,3 +181,35 @@ def test_delimiter_with_usecols_and_parse_dates(all_parsers):
{"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
+ # GH#31920
+ decimal_number_check(python_parser_only, numeric_decimal, thousands, None)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
+def test_1000_sep_decimal_float_precision(
+ c_parser_only, numeric_decimal, float_precision, thousands
+):
+ # test decimal and thousand sep handling in across 'float_precision'
+ # parsers
+ decimal_number_check(c_parser_only, numeric_decimal, thousands, float_precision)
+
+
+def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
+ # GH#31920
+ value = numeric_decimal[0]
+ if thousands is None and "_" in value:
+ pytest.skip("Skip test if no thousands sep is defined and sep is in value")
+ df = parser.read_csv(
+ StringIO(value),
+ sep="|",
+ thousands=thousands,
+ decimal=",",
+ header=None,
+ )
+ val = df.iloc[0, 0]
+ assert val == numeric_decimal[1]
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 15e7569ea9014..da778093237b0 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -653,64 +653,6 @@ def test_1000_sep_with_decimal(
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
-@pytest.mark.parametrize(
- "value,expected",
- [
- ("-1,0", -1.0),
- ("-1,2e0", -1.2),
- ("-1e0", -1.0),
- ("+1e0", 1.0),
- ("+1e+0", 1.0),
- ("+1e-1", 0.1),
- ("+,1e1", 1.0),
- ("+1,e0", 1.0),
- ("-,1e1", -1.0),
- ("-1,e0", -1.0),
- ("0,1", 0.1),
- ("1,", 1.0),
- (",1", 0.1),
- ("-,1", -0.1),
- ("1_,", 1.0),
- ("1_234,56", 1234.56),
- ("1_234,56e0", 1234.56),
- # negative cases; must not parse as float
- ("_", "_"),
- ("-_", "-_"),
- ("-_1", "-_1"),
- ("-_1e0", "-_1e0"),
- ("_1", "_1"),
- ("_1,", "_1,"),
- ("_1,_", "_1,_"),
- ("_1e0", "_1e0"),
- ("1,2e_1", "1,2e_1"),
- ("1,2e1_0", "1,2e1_0"),
- ("1,_2", "1,_2"),
- (",1__2", ",1__2"),
- (",1e", ",1e"),
- ("-,1e", "-,1e"),
- ("1_000,000_000", "1_000,000_000"),
- ("1,e1_2", "1,e1_2"),
- ],
-)
-def test_1000_sep_decimal_float_precision(
- c_parser_only, value, expected, float_precision
-):
- # test decimal and thousand sep handling in across 'float_precision'
- # parsers
- parser = c_parser_only
- df = parser.read_csv(
- StringIO(value),
- sep="|",
- thousands="_",
- decimal=",",
- header=None,
- float_precision=float_precision,
- )
- val = df.iloc[0, 0]
- assert val == expected
-
-
def test_float_precision_options(c_parser_only):
# GH 17154, 36228
parser = c_parser_only
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index 04d5413abfafc..d55a6361fc8d2 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -305,49 +305,3 @@ def test_malformed_skipfooter(python_parser_only):
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
-
-
-@pytest.mark.parametrize("thousands", [None, "."])
-@pytest.mark.parametrize(
- "value, result_value",
- [
- ("1,2", 1.2),
- ("1,2e-1", 0.12),
- ("1,2E-1", 0.12),
- ("1,2e-10", 0.0000000012),
- ("1,2e1", 12.0),
- ("1,2E1", 12.0),
- ("-1,2e-1", -0.12),
- ("0,2", 0.2),
- (",2", 0.2),
- ],
-)
-def test_decimal_and_exponential(python_parser_only, thousands, value, result_value):
- # GH#31920
- data = StringIO(
- f"""a b
- 1,1 {value}
- """
- )
- result = python_parser_only.read_csv(
- data, "\t", decimal=",", engine="python", thousands=thousands
- )
- expected = DataFrame({"a": [1.1], "b": [result_value]})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("thousands", [None, "."])
-@pytest.mark.parametrize(
- "value",
- ["e11,2", "1e11,2", "1,2,2", "1,2.1", "1,2e-10e1", "--1,2", "1a.2,1", "1..2,3"],
-)
-def test_decimal_and_exponential_erroneous(python_parser_only, thousands, value):
- # GH#31920
- data = StringIO(
- f"""a b
- 1,1 {value}
- """
- )
- result = python_parser_only.read_csv(data, "\t", decimal=",", thousands=thousands)
- expected = DataFrame({"a": [1.1], "b": [value]})
- tm.assert_frame_equal(result, expected)
| - [x] closes #38926
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Minor regex improvements.
Is a fixture the right thing to do here?
| https://api.github.com/repos/pandas-dev/pandas/pulls/38954 | 2021-01-04T19:03:38Z | 2021-01-04T23:15:17Z | 2021-01-04T23:15:17Z | 2021-01-04T23:23:01Z |
DOC: clarify and spellcheck indexing documentation | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 0a11344d575f1..dc66303a44f53 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -429,7 +429,7 @@ For the rationale behind this behavior, see
s = pd.Series(list('abcdef'), index=[0, 3, 2, 5, 4, 2])
s.loc[3:5]
-Also, if the index has duplicate labels *and* either the start or the stop label is dupulicated,
+Also, if the index has duplicate labels *and* either the start or the stop label is duplicated,
an error will be raised. For instance, in the above example, ``s.loc[2:5]`` would raise a ``KeyError``.
For more information about duplicate labels, see
@@ -1138,10 +1138,10 @@ Setting with enlargement conditionally using :func:`numpy`
----------------------------------------------------------
An alternative to :meth:`~pandas.DataFrame.where` is to use :func:`numpy.where`.
-Combined with setting a new column, you can use it to enlarge a dataframe where the
+Combined with setting a new column, you can use it to enlarge a DataFrame where the
values are determined conditionally.
-Consider you have two choices to choose from in the following dataframe. And you want to
+Consider you have two choices to choose from in the following DataFrame. And you want to
set a new column color to 'green' when the second column has 'Z'. You can do the
following:
@@ -1293,8 +1293,8 @@ Full numpy-like syntax:
df.query('(a < b) & (b < c)')
df[(df['a'] < df['b']) & (df['b'] < df['c'])]
-Slightly nicer by removing the parentheses (by binding making comparison
-operators bind tighter than ``&`` and ``|``).
+Slightly nicer by removing the parentheses (comparison operators bind tighter
+than ``&`` and ``|``):
.. ipython:: python
| https://api.github.com/repos/pandas-dev/pandas/pulls/38951 | 2021-01-04T18:42:54Z | 2021-01-04T22:34:53Z | 2021-01-04T22:34:53Z | 2021-01-05T17:09:34Z | |
REF: de-duplicate tslibs.fields | diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 16fa05c3801c6..57404b99c7628 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -174,6 +174,18 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
return out
+cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
+ """
+ Analogous to DateOffset.is_on_offset checking for the month part of a date.
+ """
+ if modby == 1:
+ return True
+ elif modby == 3:
+ return (month - compare_month) % 3 == 0
+ else:
+ return month == compare_month
+
+
@cython.wraparound(False)
@cython.boundscheck(False)
def get_start_end_field(const int64_t[:] dtindex, str field,
@@ -191,6 +203,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
+ int compare_month, modby
out = np.zeros(count, dtype='int8')
@@ -215,102 +228,15 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
end_month = 12
start_month = 1
- if field == 'is_month_start':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_firstbday(dts.year, dts.month):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == 1:
- out[i] = 1
-
- elif field == 'is_month_end':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_lastbday(dts.year, dts.month):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_days_in_month(dts.year, dts.month):
- out[i] = 1
-
- elif field == 'is_quarter_start':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - start_month) % 3 == 0) and (
- dts.day == get_firstbday(dts.year, dts.month)):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - start_month) % 3 == 0) and dts.day == 1:
- out[i] = 1
-
- elif field == 'is_quarter_end':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - end_month) % 3 == 0) and (
- dts.day == get_lastbday(dts.year, dts.month)):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - end_month) % 3 == 0) and (
- dts.day == get_days_in_month(dts.year, dts.month)):
- out[i] = 1
+ compare_month = start_month if "start" in field else end_month
+ if "month" in field:
+ modby = 1
+ elif "quarter" in field:
+ modby = 3
+ else:
+ modby = 12
- elif field == 'is_year_start':
+ if field in ["is_month_start", "is_quarter_start", "is_year_start"]:
if is_business:
for i in range(count):
if dtindex[i] == NPY_NAT:
@@ -319,7 +245,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == start_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_firstbday(dts.year, dts.month)):
out[i] = 1
@@ -331,10 +257,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == start_month) and dts.day == 1:
+ if _is_on_month(dts.month, compare_month, modby) and dts.day == 1:
out[i] = 1
- elif field == 'is_year_end':
+ elif field in ["is_month_end", "is_quarter_end", "is_year_end"]:
if is_business:
for i in range(count):
if dtindex[i] == NPY_NAT:
@@ -343,7 +269,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == end_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_lastbday(dts.year, dts.month)):
out[i] = 1
@@ -355,7 +281,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == end_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_days_in_month(dts.year, dts.month)):
out[i] = 1
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38950 | 2021-01-04T18:19:52Z | 2021-01-04T19:20:16Z | 2021-01-04T19:20:16Z | 2021-01-04T19:30:14Z |
Backport PR #38909 on branch 1.2.x (BUG: Fixed regression in rolling.skew and rolling.kurt modifying object) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 4102bdd07aa8f..37298d12a12c4 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -37,7 +37,7 @@ Fixed regressions
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
--
+- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 54a09a6d2ede7..882674a5c5c92 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -523,7 +523,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t x = 0, xx = 0, xxx = 0
int64_t nobs = 0, i, j, N = len(values), nobs_mean = 0
int64_t s, e
- ndarray[float64_t] output, mean_array
+ ndarray[float64_t] output, mean_array, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 3)
@@ -532,10 +532,11 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
)
output = np.empty(N, dtype=float)
min_val = np.nanmin(values)
+ values_copy = np.copy(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -544,7 +545,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e5:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -556,7 +557,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -566,13 +567,13 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- val = values[j]
+ val = values_copy[j]
remove_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_remove,
&compensation_xx_remove, &compensation_xxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -703,7 +704,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t compensation_x_remove = 0, compensation_x_add = 0
float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
int64_t nobs = 0, i, j, s, e, N = len(values), nobs_mean = 0
- ndarray[float64_t] output
+ ndarray[float64_t] output, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 4)
@@ -711,11 +712,12 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
start, end
)
output = np.empty(N, dtype=float)
+ values_copy = np.copy(values)
min_val = np.nanmin(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -724,7 +726,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e4:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -736,7 +738,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
@@ -746,13 +748,13 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- remove_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ remove_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_remove, &compensation_xx_remove,
&compensation_xxx_remove, &compensation_xxxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 10b23cadfe279..e2cdf76d038ec 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1102,11 +1102,13 @@ def test_groupby_rolling_nan_included():
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_rolling_skew_kurt_numerical_stability(method):
- # GH: 6929
- s = Series(np.random.rand(10))
- expected = getattr(s.rolling(3), method)()
- s = s + 50000
- result = getattr(s.rolling(3), method)()
+ # GH#6929
+ ser = Series(np.random.rand(10))
+ ser_copy = ser.copy()
+ expected = getattr(ser.rolling(3), method)()
+ tm.assert_series_equal(ser, ser_copy)
+ ser = ser + 50000
+ result = getattr(ser.rolling(3), method)()
tm.assert_series_equal(result, expected)
| Backport PR #38909: BUG: Fixed regression in rolling.skew and rolling.kurt modifying object | https://api.github.com/repos/pandas-dev/pandas/pulls/38945 | 2021-01-04T13:39:05Z | 2021-01-04T15:23:01Z | 2021-01-04T15:23:01Z | 2021-01-04T15:23:01Z |
Backport PR #38893: doc fix for testing.assert_series_equal check_freq arg | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index b1f8389420cd9..4102bdd07aa8f 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -15,12 +15,12 @@ including other versions of pandas.
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_121.api_breaking.testing.assert_frame_equal:
+.. _whatsnew_121.api_breaking.testing.check_freq:
-Added ``check_freq`` argument to ``testing.assert_frame_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` now raises ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked by :func:`testing.assert_frame_equal`.
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
.. ---------------------------------------------------------------------------
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 0b0778f3d3e5c..90840033ca099 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1334,6 +1334,8 @@ def assert_series_equal(
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
+
+ .. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
| Backport PR #38893 | https://api.github.com/repos/pandas-dev/pandas/pulls/38942 | 2021-01-04T11:47:48Z | 2021-01-04T13:19:25Z | 2021-01-04T13:19:25Z | 2021-01-04T13:47:10Z |
DOC: minor tweaks to formatting on SQL comparison page | diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 52799442d6118..75d26354ddfa5 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -69,31 +69,31 @@ Filtering in SQL is done via a WHERE clause.
.. include:: includes/filtering.rst
-Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and &
-(AND).
+Just like SQL's ``OR`` and ``AND``, multiple conditions can be passed to a DataFrame using ``|``
+(``OR``) and ``&`` (``AND``).
+
+Tips of more than $5 at Dinner meals:
.. code-block:: sql
- -- tips of more than $5.00 at Dinner meals
SELECT *
FROM tips
WHERE time = 'Dinner' AND tip > 5.00;
.. ipython:: python
- # tips of more than $5.00 at Dinner meals
tips[(tips["time"] == "Dinner") & (tips["tip"] > 5.00)]
+Tips by parties of at least 5 diners OR bill total was more than $45:
+
.. code-block:: sql
- -- tips by parties of at least 5 diners OR bill total was more than $45
SELECT *
FROM tips
WHERE size >= 5 OR total_bill > 45;
.. ipython:: python
- # tips by parties of at least 5 diners OR bill total was more than $45
tips[(tips["size"] >= 5) | (tips["total_bill"] > 45)]
NULL checking is done using the :meth:`~pandas.Series.notna` and :meth:`~pandas.Series.isna`
@@ -134,7 +134,7 @@ Getting items where ``col1`` IS NOT NULL can be done with :meth:`~pandas.Series.
GROUP BY
--------
-In pandas, SQL's GROUP BY operations are performed using the similarly named
+In pandas, SQL's ``GROUP BY`` operations are performed using the similarly named
:meth:`~pandas.DataFrame.groupby` method. :meth:`~pandas.DataFrame.groupby` typically refers to a
process where we'd like to split a dataset into groups, apply some function (typically aggregation)
, and then combine the groups together.
@@ -162,7 +162,7 @@ The pandas equivalent would be:
Notice that in the pandas code we used :meth:`~pandas.core.groupby.DataFrameGroupBy.size` and not
:meth:`~pandas.core.groupby.DataFrameGroupBy.count`. This is because
:meth:`~pandas.core.groupby.DataFrameGroupBy.count` applies the function to each column, returning
-the number of ``not null`` records within each.
+the number of ``NOT NULL`` records within each.
.. ipython:: python
@@ -223,10 +223,10 @@ Grouping by more than one column is done by passing a list of columns to the
JOIN
----
-JOINs can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By default,
-:meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has
-parameters allowing you to specify the type of join to perform (LEFT, RIGHT, INNER, FULL) or the
-columns to join on (column names or indices).
+``JOIN``\s can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By
+default, :meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has
+parameters allowing you to specify the type of join to perform (``LEFT``, ``RIGHT``, ``INNER``,
+``FULL``) or the columns to join on (column names or indices).
.. ipython:: python
@@ -235,7 +235,7 @@ columns to join on (column names or indices).
Assume we have two database tables of the same name and structure as our DataFrames.
-Now let's go over the various types of JOINs.
+Now let's go over the various types of ``JOIN``\s.
INNER JOIN
~~~~~~~~~~
@@ -261,9 +261,11 @@ column with another DataFrame's index.
LEFT OUTER JOIN
~~~~~~~~~~~~~~~
+
+Show all records from ``df1``.
+
.. code-block:: sql
- -- show all records from df1
SELECT *
FROM df1
LEFT OUTER JOIN df2
@@ -271,14 +273,15 @@ LEFT OUTER JOIN
.. ipython:: python
- # show all records from df1
pd.merge(df1, df2, on="key", how="left")
RIGHT JOIN
~~~~~~~~~~
+
+Show all records from ``df2``.
+
.. code-block:: sql
- -- show all records from df2
SELECT *
FROM df1
RIGHT OUTER JOIN df2
@@ -286,17 +289,17 @@ RIGHT JOIN
.. ipython:: python
- # show all records from df2
pd.merge(df1, df2, on="key", how="right")
FULL JOIN
~~~~~~~~~
-pandas also allows for FULL JOINs, which display both sides of the dataset, whether or not the
-joined columns find a match. As of writing, FULL JOINs are not supported in all RDBMS (MySQL).
+pandas also allows for ``FULL JOIN``\s, which display both sides of the dataset, whether or not the
+joined columns find a match. As of writing, ``FULL JOIN``\s are not supported in all RDBMS (MySQL).
+
+Show all records from both tables.
.. code-block:: sql
- -- show all records from both tables
SELECT *
FROM df1
FULL OUTER JOIN df2
@@ -304,13 +307,13 @@ joined columns find a match. As of writing, FULL JOINs are not supported in all
.. ipython:: python
- # show all records from both frames
pd.merge(df1, df2, on="key", how="outer")
UNION
-----
-UNION ALL can be performed using :meth:`~pandas.concat`.
+
+``UNION ALL`` can be performed using :meth:`~pandas.concat`.
.. ipython:: python
@@ -342,7 +345,7 @@ UNION ALL can be performed using :meth:`~pandas.concat`.
pd.concat([df1, df2])
-SQL's UNION is similar to UNION ALL, however UNION will remove duplicate rows.
+SQL's ``UNION`` is similar to ``UNION ALL``, however ``UNION`` will remove duplicate rows.
.. code-block:: sql
@@ -444,7 +447,7 @@ the same using ``rank(method='first')`` function
Let's find tips with (rank < 3) per gender group for (tips < 2).
Notice that when using ``rank(method='min')`` function
``rnk_min`` remains the same for the same ``tip``
-(as Oracle's RANK() function)
+(as Oracle's ``RANK()`` function)
.. ipython:: python
@@ -477,7 +480,7 @@ DELETE
DELETE FROM tips
WHERE tip > 9;
-In pandas we select the rows that should remain, instead of deleting them
+In pandas we select the rows that should remain instead of deleting them:
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/filtering.rst b/doc/source/getting_started/comparison/includes/filtering.rst
index 861a93d92c2c2..8ddf7c0d2fa39 100644
--- a/doc/source/getting_started/comparison/includes/filtering.rst
+++ b/doc/source/getting_started/comparison/includes/filtering.rst
@@ -1,5 +1,5 @@
DataFrames can be filtered in multiple ways; the most intuitive of which is using
-:ref:`boolean indexing <indexing.boolean>`
+:ref:`boolean indexing <indexing.boolean>`.
.. ipython:: python
| Adding code formatting, missing punctuation, etc. No changes of substance.
- [ ] ~~closes #xxxx~~
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/38941 | 2021-01-04T08:41:33Z | 2021-01-04T13:33:13Z | 2021-01-04T13:33:13Z | 2021-01-04T13:33:16Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2dade8afbf91f..f5d8503041ccd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,11 +20,9 @@ repos:
types: [text]
args: [--append-config=flake8/cython-template.cfg]
- repo: https://github.com/PyCQA/isort
- rev: 5.6.4
+ rev: 5.7.0
hooks:
- id: isort
- types: [text] # overwrite upstream `types: [python]`
- types_or: [python, cython]
- repo: https://github.com/asottile/pyupgrade
rev: v2.7.4
hooks:
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/master)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.9.3-py2.py3-none-any.whl (184 kB)
Collecting cfgv>=2.0.0
Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-1.5.11-py2.py3-none-any.whl (97 kB)
Collecting nodeenv>=0.11.1
Using cached nodeenv-1.5.0-py2.py3-none-any.whl (21 kB)
Collecting pyyaml>=5.1
Using cached PyYAML-5.3.1-cp39-cp39-linux_x86_64.whl
Collecting toml
Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.2.2-py2.py3-none-any.whl (5.7 MB)
Collecting appdirs<2,>=1.4.3
Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)
Collecting distlib<1,>=0.3.1
Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB)
Collecting filelock<4,>=3.0.0
Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB)
Collecting six<2,>=1.9.0
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit
Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-1.5.11 nodeenv-1.5.0 pre-commit-2.9.3 pyyaml-5.3.1 six-1.15.0 toml-0.10.2 virtualenv-20.2.2
```
### stderr:
```Shell
WARNING: You are using pip version 20.3.1; however, version 20.3.3 is available.
You should consider upgrading via the '/opt/hostedtoolcache/Python/3.9.1/x64/bin/python -m pip install --upgrade pip' command.
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/python/black ... already up to date.
Updating https://gitlab.com/pycqa/flake8 ... already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
updating 5.6.4 -> 5.7.0.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
already up to date.
Updating https://github.com/pre-commit/pygrep-hooks ... already up to date.
Updating https://github.com/asottile/yesqa ... already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
black..................................................................................................Passed
flake8.................................................................................................Passed
flake8 (cython)........................................................................................Passed
flake8 (cython template)...............................................................................Passed
isort..................................................................................................Passed
pyupgrade..............................................................................................Passed
rst ``code`` is two backticks..........................................................................Passed
rst directives end with two colons.....................................................................Passed
rst ``inline code`` next to normal text................................................................Passed
Generate pip dependency from conda.....................................................................Passed
flake8-rst.............................................................................................Passed
Check for non-standard imports.........................................................................Passed
Check for non-standard numpy.random-related imports excluding pandas/_testing.py.......................Passed
Check for non-standard imports in test suite...........................................................Passed
Check for incorrect code block or IPython directives...................................................Passed
Check for use of not concatenated strings..............................................................Passed
Check for strings with wrong placed spaces.............................................................Passed
Check for import of private attributes across modules..................................................Passed
Check for use of private functions across modules......................................................Passed
Check for use of bare pytest raises....................................................................Passed
Check for inconsistent use of pandas namespace in tests................................................Passed
Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias..........................Passed
Check for use of foo.__class__ instead of type(foo)....................................................Passed
Check for use of comment-based annotation syntax and missing error codes...............................Passed
Check code for instances of os.remove..................................................................Passed
Strip unnecessary `# noqa`s............................................................................Passed
Fix End of Files.......................................................................................Passed
Trim Trailing Whitespace...............................................................................Passed
codespell..............................................................................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/38940 | 2021-01-04T07:40:09Z | 2021-01-05T09:41:28Z | 2021-01-05T09:41:28Z | 2021-01-05T09:41:33Z |
API: honor copy=True when passing dict to DataFrame | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1e723493a4cc8..63902b53ea36d 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -110,6 +110,30 @@ both XPath 1.0 and XSLT 1.0 is available. (:issue:`27554`)
For more, see :ref:`io.xml` in the user guide on IO tools.
+.. _whatsnew_130.dataframe_honors_copy_with_dict:
+
+DataFrame constructor honors ``copy=False`` with dict
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When passing a dictionary to :class:`DataFrame` with ``copy=False``,
+a copy will no longer be made (:issue:`32960`)
+
+.. ipython:: python
+
+ arr = np.array([1, 2, 3])
+ df = pd.DataFrame({"A": arr, "B": arr.copy()}, copy=False)
+ df
+
+``df["A"]`` remains a view on ``arr``:
+
+.. ipython:: python
+
+ arr[0] = 0
+ assert df.iloc[0, 0] == 0
+
+The default behavior when not passing ``copy`` will remain unchanged, i.e.
+a copy will be made.
+
.. _whatsnew_130.enhancements.other:
Other enhancements
@@ -546,6 +570,8 @@ Conversion
- Bug in creating a :class:`DataFrame` from an empty ``np.recarray`` not retaining the original dtypes (:issue:`40121`)
- Bug in :class:`DataFrame` failing to raise ``TypeError`` when constructing from a ``frozenset`` (:issue:`40163`)
- Bug in :class:`Index` construction silently ignoring a passed ``dtype`` when the data cannot be cast to that dtype (:issue:`21311`)
+- Bug in :class:`DataFrame` construction with a dictionary containing an arraylike with ``ExtensionDtype`` and ``copy=True`` failing to make a copy (:issue:`38939`)
+-
Strings
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 510bdfcb0079f..6f2edaa300c93 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -476,8 +476,12 @@ class DataFrame(NDFrame, OpsMixin):
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
- copy : bool, default False
- Copy data from inputs. Only affects DataFrame / 2d ndarray input.
+ copy : bool or None, default None
+ Copy data from inputs.
+ For dict data, the default of None behaves like ``copy=True``. For DataFrame
+ or 2d ndarray input, the default of None behaves like ``copy=False``.
+
+ .. versionchanged:: 1.3.0
See Also
--------
@@ -555,8 +559,16 @@ def __init__(
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
- copy: bool = False,
+ copy: Optional[bool] = None,
):
+
+ if copy is None:
+ if isinstance(data, dict) or data is None:
+ # retain pre-GH#38939 default behavior
+ copy = True
+ else:
+ copy = False
+
if data is None:
data = {}
if dtype is not None:
@@ -565,18 +577,13 @@ def __init__(
if isinstance(data, DataFrame):
data = data._mgr
- # first check if a Manager is passed without any other arguments
- # -> use fastpath (without checking Manager type)
- if (
- index is None
- and columns is None
- and dtype is None
- and copy is False
- and isinstance(data, (BlockManager, ArrayManager))
- ):
- # GH#33357 fastpath
- NDFrame.__init__(self, data)
- return
+ if isinstance(data, (BlockManager, ArrayManager)):
+ # first check if a Manager is passed without any other arguments
+ # -> use fastpath (without checking Manager type)
+ if index is None and columns is None and dtype is None and not copy:
+ # GH#33357 fastpath
+ NDFrame.__init__(self, data)
+ return
manager = get_option("mode.data_manager")
@@ -586,7 +593,8 @@ def __init__(
)
elif isinstance(data, dict):
- mgr = dict_to_mgr(data, index, columns, dtype=dtype, typ=manager)
+ # GH#38939 de facto copy defaults to False only in non-dict cases
+ mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 1ee38834c5758..0ecd798986c53 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1807,7 +1807,9 @@ def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
- return result.unstack()
+ # FIXME: not being consolidated breaks
+ # test_describe_with_duplicate_output_column_names
+ return result._consolidate().unstack()
@final
def resample(self, rule, *args, **kwargs):
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 9959174373034..5b4b710838ef8 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -101,9 +101,11 @@ def arrays_to_mgr(
arr_names,
index,
columns,
+ *,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
typ: Optional[str] = None,
+ consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
@@ -131,7 +133,9 @@ def arrays_to_mgr(
axes = [columns, index]
if typ == "block":
- return create_block_manager_from_arrays(arrays, arr_names, axes)
+ return create_block_manager_from_arrays(
+ arrays, arr_names, axes, consolidate=consolidate
+ )
elif typ == "array":
if len(columns) != len(arrays):
assert len(arrays) == 0
@@ -181,7 +185,7 @@ def rec_array_to_mgr(
if columns is None:
columns = arr_columns
- mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype, typ=typ)
+ mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
@@ -376,7 +380,13 @@ def maybe_squeeze_dt64tz(dta: ArrayLike) -> ArrayLike:
def dict_to_mgr(
- data: Dict, index, columns, dtype: Optional[DtypeObj], typ: str
+ data: Dict,
+ index,
+ columns,
+ *,
+ dtype: Optional[DtypeObj] = None,
+ typ: str = "block",
+ copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
@@ -414,6 +424,8 @@ def dict_to_mgr(
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
+ arrays = list(arrays)
+
else:
keys = list(data.keys())
columns = data_names = Index(keys)
@@ -424,7 +436,21 @@ def dict_to_mgr(
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
- return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype, typ=typ)
+
+ if copy:
+ # arrays_to_mgr (via form_blocks) won't make copies for EAs
+ # dtype attr check to exclude EADtype-castable strs
+ arrays = [
+ x
+ if not hasattr(x, "dtype") or not isinstance(x.dtype, ExtensionDtype)
+ else x.copy()
+ for x in arrays
+ ]
+ # TODO: can we get rid of the dt64tz special case above?
+
+ return arrays_to_mgr(
+ arrays, data_names, index, columns, dtype=dtype, typ=typ, consolidate=copy
+ )
def nested_data_to_arrays(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 69338abcd7d58..6681015856d6b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -53,7 +53,10 @@
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
-from pandas.core.construction import extract_array
+from pandas.core.construction import (
+ ensure_wrapped_if_datetimelike,
+ extract_array,
+)
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import (
Float64Index,
@@ -991,6 +994,8 @@ def fast_xs(self, loc: int) -> ArrayLike:
# Any]]"
result = np.empty(n, dtype=dtype) # type: ignore[arg-type]
+ result = ensure_wrapped_if_datetimelike(result)
+
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
@@ -1693,7 +1698,7 @@ def set_values(self, values: ArrayLike):
def create_block_manager_from_blocks(
- blocks: List[Block], axes: List[Index]
+ blocks: List[Block], axes: List[Index], consolidate: bool = True
) -> BlockManager:
try:
mgr = BlockManager(blocks, axes)
@@ -1703,7 +1708,8 @@ def create_block_manager_from_blocks(
tot_items = sum(arr.shape[0] for arr in arrays)
raise construction_error(tot_items, arrays[0].shape[1:], axes, err)
- mgr._consolidate_inplace()
+ if consolidate:
+ mgr._consolidate_inplace()
return mgr
@@ -1713,7 +1719,10 @@ def _extract_array(obj):
def create_block_manager_from_arrays(
- arrays, names: Index, axes: List[Index]
+ arrays,
+ names: Index,
+ axes: List[Index],
+ consolidate: bool = True,
) -> BlockManager:
assert isinstance(names, Index)
assert isinstance(axes, list)
@@ -1722,12 +1731,13 @@ def create_block_manager_from_arrays(
arrays = [_extract_array(x) for x in arrays]
try:
- blocks = _form_blocks(arrays, names, axes)
+ blocks = _form_blocks(arrays, names, axes, consolidate)
mgr = BlockManager(blocks, axes)
- mgr._consolidate_inplace()
- return mgr
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
+ if consolidate:
+ mgr._consolidate_inplace()
+ return mgr
def construction_error(
@@ -1760,7 +1770,7 @@ def construction_error(
def _form_blocks(
- arrays: List[ArrayLike], names: Index, axes: List[Index]
+ arrays: List[ArrayLike], names: Index, axes: List[Index], consolidate: bool
) -> List[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
@@ -1786,15 +1796,21 @@ def _form_blocks(
blocks: List[Block] = []
if len(items_dict["NumericBlock"]):
- numeric_blocks = _multi_blockify(items_dict["NumericBlock"])
+ numeric_blocks = _multi_blockify(
+ items_dict["NumericBlock"], consolidate=consolidate
+ )
blocks.extend(numeric_blocks)
if len(items_dict["TimeDeltaBlock"]):
- timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
+ timedelta_blocks = _multi_blockify(
+ items_dict["TimeDeltaBlock"], consolidate=consolidate
+ )
blocks.extend(timedelta_blocks)
if len(items_dict["DatetimeBlock"]):
- datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
+ datetime_blocks = _simple_blockify(
+ items_dict["DatetimeBlock"], DT64NS_DTYPE, consolidate=consolidate
+ )
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
@@ -1805,7 +1821,9 @@ def _form_blocks(
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
- object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
+ object_blocks = _simple_blockify(
+ items_dict["ObjectBlock"], np.object_, consolidate=consolidate
+ )
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
@@ -1844,11 +1862,14 @@ def _form_blocks(
return blocks
-def _simple_blockify(tuples, dtype) -> List[Block]:
+def _simple_blockify(tuples, dtype, consolidate: bool) -> List[Block]:
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
+ if not consolidate:
+ return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)
+
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
@@ -1859,8 +1880,12 @@ def _simple_blockify(tuples, dtype) -> List[Block]:
return [block]
-def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
+def _multi_blockify(tuples, dtype: Optional[DtypeObj] = None, consolidate: bool = True):
""" return an array of blocks that potentially have different dtypes """
+
+ if not consolidate:
+ return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)
+
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
@@ -1880,6 +1905,18 @@ def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
return new_blocks
+def _tuples_to_blocks_no_consolidate(tuples, dtype: Optional[DtypeObj]) -> List[Block]:
+ # tuples produced within _form_blocks are of the form (placement, whatever, array)
+ if dtype is not None:
+ return [
+ new_block(
+ np.atleast_2d(x[1].astype(dtype, copy=False)), placement=x[0], ndim=2
+ )
+ for x in tuples
+ ]
+ return [new_block(np.atleast_2d(x[1]), placement=x[0], ndim=2) for x in tuples]
+
+
def _stack_arrays(tuples, dtype: np.dtype):
placement, arrays = zip(*tuples)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 1e2622d6a8fcd..ef86a8e6a1cb0 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -538,7 +538,6 @@ def test_df_div_zero_series_does_not_commute(self):
def test_df_mod_zero_df(self, using_array_manager):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
-
# this is technically wrong, as the integer portion is coerced to float
first = Series([0, 0, 0, 0])
if not using_array_manager:
@@ -551,6 +550,15 @@ def test_df_mod_zero_df(self, using_array_manager):
result = df % df
tm.assert_frame_equal(result, expected)
+ # GH#38939 If we dont pass copy=False, df is consolidated and
+ # result["first"] is float64 instead of int64
+ df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}, copy=False)
+ first = Series([0, 0, 0, 0], dtype="int64")
+ second = Series([np.nan, np.nan, np.nan, 0])
+ expected = pd.DataFrame({"first": first, "second": second})
+ result = df % df
+ tm.assert_frame_equal(result, expected)
+
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 366b24e328642..68dbdd9e0bf35 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -150,7 +150,7 @@ def take(self, indexer, allow_fill=False, fill_value=None):
return self._from_sequence(result)
def copy(self):
- return type(self)(self._data.copy())
+ return type(self)(self._data.copy(), dtype=self.dtype)
def astype(self, dtype, copy=True):
if is_dtype_equal(dtype, self._dtype):
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 437160e78741b..55f9d85574f94 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -261,7 +261,18 @@ def test_dataframe_constructor_with_dtype():
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("frame", [True, False])
+@pytest.mark.parametrize(
+ "frame",
+ [
+ pytest.param(
+ True,
+ marks=pytest.mark.xfail(
+ reason="pd.concat call inside NDFrame.astype reverts the dtype"
+ ),
+ ),
+ False,
+ ],
+)
def test_astype_dispatches(frame):
# This is a dtype-specific test that ensures Series[decimal].astype
# gets all the way through to ExtensionArray.astype
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 0613c727dec98..759277a47f62b 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -285,7 +285,7 @@ def test_combine_le(self, data_repeated):
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
- df = pd.DataFrame({"A": arr})
+ df = pd.DataFrame({"A": arr}, copy=False)
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b76a44b3c86be..d618c4cda4f13 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1997,7 +1997,7 @@ def test_constructor_ndarray_copy(self, float_frame):
def test_constructor_series_copy(self, float_frame):
series = float_frame._series
- df = DataFrame({"A": series["A"]})
+ df = DataFrame({"A": series["A"]}, copy=True)
df["A"][:] = 5
assert not (series["A"] == 5).all()
@@ -2311,6 +2311,86 @@ def test_constructor_list_str_na(self, string_dtype):
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("copy", [False, True])
+ @td.skip_array_manager_not_yet_implemented
+ def test_dict_nocopy(self, copy, any_nullable_numeric_dtype, any_numpy_dtype):
+ a = np.array([1, 2], dtype=any_numpy_dtype)
+ b = np.array([3, 4], dtype=any_numpy_dtype)
+ if b.dtype.kind in ["S", "U"]:
+ # These get cast, making the checks below more cumbersome
+ return
+
+ c = pd.array([1, 2], dtype=any_nullable_numeric_dtype)
+ df = DataFrame({"a": a, "b": b, "c": c}, copy=copy)
+
+ def get_base(obj):
+ if isinstance(obj, np.ndarray):
+ return obj.base
+ elif isinstance(obj.dtype, np.dtype):
+ # i.e. DatetimeArray, TimedeltaArray
+ return obj._ndarray.base
+ else:
+ raise TypeError
+
+ def check_views():
+ # written to work for either BlockManager or ArrayManager
+ assert sum(x is c for x in df._mgr.arrays) == 1
+ assert (
+ sum(
+ get_base(x) is a
+ for x in df._mgr.arrays
+ if isinstance(x.dtype, np.dtype)
+ )
+ == 1
+ )
+ assert (
+ sum(
+ get_base(x) is b
+ for x in df._mgr.arrays
+ if isinstance(x.dtype, np.dtype)
+ )
+ == 1
+ )
+
+ if not copy:
+ # constructor preserves views
+ check_views()
+
+ df.iloc[0, 0] = 0
+ df.iloc[0, 1] = 0
+ if not copy:
+ # Check that the underlying data behind df["c"] is still `c`
+ # after setting with iloc. Since we don't know which entry in
+ # df._mgr.arrays corresponds to df["c"], we just check that exactly
+ # one of these arrays is `c`. GH#38939
+ assert sum(x is c for x in df._mgr.arrays) == 1
+ # TODO: we can call check_views if we stop consolidating
+ # in setitem_with_indexer
+
+ # FIXME: until GH#35417, iloc.setitem into EA values does not preserve
+ # view, so we have to check in the other direction
+ # df.iloc[0, 2] = 0
+ # if not copy:
+ # check_views()
+ c[0] = 0
+
+ if copy:
+ if a.dtype.kind == "M":
+ assert a[0] == a.dtype.type(1, "ns")
+ assert b[0] == b.dtype.type(3, "ns")
+ else:
+ assert a[0] == a.dtype.type(1)
+ assert b[0] == b.dtype.type(3)
+ # FIXME: enable after GH#35417
+ # assert c[0] == 1
+ assert df.iloc[0, 2] == 1
+ else:
+ # TODO: we can call check_views if we stop consolidating
+ # in setitem_with_indexer
+ # FIXME: enable after GH#35417
+ # assert b[0] == 0
+ assert df.iloc[0, 2] == 0
+
class TestDataFrameConstructorWithDatetimeTZ:
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 85accac5a8235..ae07fc6e3b2b3 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1063,6 +1063,7 @@ def test_loc_setitem_empty_append_raises(self):
[
"cannot copy sequence with size 2 to array axis with dimension 0",
r"could not broadcast input array from shape \(2,\) into shape \(0,\)",
+ "Must have equal len keys and value when setting with an iterable",
]
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index b0d41a89931e9..b8680cc4e611e 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -171,7 +171,8 @@ def test_partial_setting_mixed_dtype(self):
tm.assert_frame_equal(df, DataFrame(columns=["A", "B"], index=[0]))
# columns will align
- df = DataFrame(columns=["A", "B"])
+ # TODO: it isn't great that this behavior depends on consolidation
+ df = DataFrame(columns=["A", "B"])._consolidate()
df.loc[0] = Series(1, index=["B"])
exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64")
| - [x] closes #32960
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
xref #34872 cc @TomAugspurger used the test_dict_nocopy you wrote but it ended up pretty mangled | https://api.github.com/repos/pandas-dev/pandas/pulls/38939 | 2021-01-04T06:26:44Z | 2021-03-31T01:03:22Z | 2021-03-31T01:03:21Z | 2021-03-31T01:05:17Z |
DOC: remove use of head() in the comparison docs | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index b97efe31b8b29..2b316cccb7fc9 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -4,23 +4,13 @@
Comparison with SAS
********************
+
For potential users coming from `SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__
this page is meant to demonstrate how different SAS operations would be
performed in pandas.
.. include:: includes/introduction.rst
-.. note::
-
- Throughout this tutorial, the pandas ``DataFrame`` will be displayed by calling
- ``df.head()``, which displays the first N (default 5) rows of the ``DataFrame``.
- This is often used in interactive work (e.g. `Jupyter notebook
- <https://jupyter.org/>`_ or terminal) - the equivalent in SAS would be:
-
- .. code-block:: sas
-
- proc print data=df(obs=5);
- run;
Data structures
---------------
@@ -120,7 +110,7 @@ The pandas method is :func:`read_csv`, which works similarly.
"pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
Like ``PROC IMPORT``, ``read_csv`` can take a number of parameters to specify
@@ -138,6 +128,19 @@ In addition to text/csv, pandas supports a variety of other data formats
such as Excel, HDF5, and SQL databases. These are all read via a ``pd.read_*``
function. See the :ref:`IO documentation<io>` for more details.
+Limiting output
+~~~~~~~~~~~~~~~
+
+.. include:: includes/limit.rst
+
+The equivalent in SAS would be:
+
+.. code-block:: sas
+
+ proc print data=df(obs=5);
+ run;
+
+
Exporting data
~~~~~~~~~~~~~~
@@ -173,20 +176,8 @@ be used on new or existing columns.
new_bill = total_bill / 2;
run;
-pandas provides similar vectorized operations by
-specifying the individual ``Series`` in the ``DataFrame``.
-New columns can be assigned in the same way.
+.. include:: includes/column_operations.rst
-.. ipython:: python
-
- tips["total_bill"] = tips["total_bill"] - 2
- tips["new_bill"] = tips["total_bill"] / 2.0
- tips.head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -278,18 +269,7 @@ drop, and rename columns.
rename total_bill=total_bill_2;
run;
-The same operations are expressed in pandas below.
-
-.. ipython:: python
-
- # keep
- tips[["sex", "total_bill", "tip"]].head()
-
- # drop
- tips.drop("sex", axis=1).head()
-
- # rename
- tips.rename(columns={"total_bill": "total_bill_2"}).head()
+.. include:: includes/column_selection.rst
Sorting by values
@@ -442,6 +422,8 @@ input frames.
Missing data
------------
+Both pandas and SAS have a representation for missing data.
+
.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 52799442d6118..685aea6334556 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -21,7 +21,7 @@ structure.
"/pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
SELECT
------
@@ -31,14 +31,13 @@ to select all columns):
.. code-block:: sql
SELECT total_bill, tip, smoker, time
- FROM tips
- LIMIT 5;
+ FROM tips;
With pandas, column selection is done by passing a list of column names to your DataFrame:
.. ipython:: python
- tips[["total_bill", "tip", "smoker", "time"]].head(5)
+ tips[["total_bill", "tip", "smoker", "time"]]
Calling the DataFrame without the list of column names would display all columns (akin to SQL's
``*``).
@@ -48,14 +47,13 @@ In SQL, you can add a calculated column:
.. code-block:: sql
SELECT *, tip/total_bill as tip_rate
- FROM tips
- LIMIT 5;
+ FROM tips;
With pandas, you can use the :meth:`DataFrame.assign` method of a DataFrame to append a new column:
.. ipython:: python
- tips.assign(tip_rate=tips["tip"] / tips["total_bill"]).head(5)
+ tips.assign(tip_rate=tips["tip"] / tips["total_bill"])
WHERE
-----
@@ -368,6 +366,20 @@ In pandas, you can use :meth:`~pandas.concat` in conjunction with
pd.concat([df1, df2]).drop_duplicates()
+
+LIMIT
+-----
+
+.. code-block:: sql
+
+ SELECT * FROM tips
+ LIMIT 10;
+
+.. ipython:: python
+
+ tips.head(10)
+
+
pandas equivalents for some SQL analytic and aggregate functions
----------------------------------------------------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index ca536e7273870..43cb775b5461d 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -10,16 +10,6 @@ performed in pandas.
.. include:: includes/introduction.rst
-.. note::
-
- Throughout this tutorial, the pandas ``DataFrame`` will be displayed by calling
- ``df.head()``, which displays the first N (default 5) rows of the ``DataFrame``.
- This is often used in interactive work (e.g. `Jupyter notebook
- <https://jupyter.org/>`_ or terminal) -- the equivalent in Stata would be:
-
- .. code-block:: stata
-
- list in 1/5
Data structures
---------------
@@ -116,7 +106,7 @@ the data set if presented with a url.
"/pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
Like ``import delimited``, :func:`read_csv` can take a number of parameters to specify
how the data should be parsed. For example, if the data were instead tab delimited,
@@ -141,6 +131,18 @@ such as Excel, SAS, HDF5, Parquet, and SQL databases. These are all read via a
function. See the :ref:`IO documentation<io>` for more details.
+Limiting output
+~~~~~~~~~~~~~~~
+
+.. include:: includes/limit.rst
+
+The equivalent in Stata would be:
+
+.. code-block:: stata
+
+ list in 1/5
+
+
Exporting data
~~~~~~~~~~~~~~
@@ -179,18 +181,8 @@ the column from the data set.
generate new_bill = total_bill / 2
drop new_bill
-pandas provides similar vectorized operations by
-specifying the individual ``Series`` in the ``DataFrame``.
-New columns can be assigned in the same way. The :meth:`DataFrame.drop` method
-drops a column from the ``DataFrame``.
+.. include:: includes/column_operations.rst
-.. ipython:: python
-
- tips["total_bill"] = tips["total_bill"] - 2
- tips["new_bill"] = tips["total_bill"] / 2
- tips.head()
-
- tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -256,20 +248,7 @@ Stata provides keywords to select, drop, and rename columns.
rename total_bill total_bill_2
-The same operations are expressed in pandas below. Note that in contrast to Stata, these
-operations do not happen in place. To make these changes persist, assign the operation back
-to a variable.
-
-.. ipython:: python
-
- # keep
- tips[["sex", "total_bill", "tip"]].head()
-
- # drop
- tips.drop("sex", axis=1).head()
-
- # rename
- tips.rename(columns={"total_bill": "total_bill_2"}).head()
+.. include:: includes/column_selection.rst
Sorting by values
@@ -428,12 +407,14 @@ or the intersection of the two by using the values created in the
restore
merge 1:n key using df2.dta
-.. include:: includes/merge_setup.rst
+.. include:: includes/merge.rst
Missing data
------------
+Both pandas and Stata have a representation for missing data.
+
.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
diff --git a/doc/source/getting_started/comparison/includes/column_operations.rst b/doc/source/getting_started/comparison/includes/column_operations.rst
new file mode 100644
index 0000000000000..bc5db8e6b8038
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/column_operations.rst
@@ -0,0 +1,11 @@
+pandas provides similar vectorized operations by specifying the individual ``Series`` in the
+``DataFrame``. New columns can be assigned in the same way. The :meth:`DataFrame.drop` method drops
+a column from the ``DataFrame``.
+
+.. ipython:: python
+
+ tips["total_bill"] = tips["total_bill"] - 2
+ tips["new_bill"] = tips["total_bill"] / 2
+ tips
+
+ tips = tips.drop("new_bill", axis=1)
diff --git a/doc/source/getting_started/comparison/includes/column_selection.rst b/doc/source/getting_started/comparison/includes/column_selection.rst
new file mode 100644
index 0000000000000..b925af1294f54
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/column_selection.rst
@@ -0,0 +1,23 @@
+The same operations are expressed in pandas below. Note that these operations do not happen in
+place. To make these changes persist, assign the operation back to a variable.
+
+Keep certain columns
+''''''''''''''''''''
+
+.. ipython:: python
+
+ tips[["sex", "total_bill", "tip"]]
+
+Drop a column
+'''''''''''''
+
+.. ipython:: python
+
+ tips.drop("sex", axis=1)
+
+Rename a column
+'''''''''''''''
+
+.. ipython:: python
+
+ tips.rename(columns={"total_bill": "total_bill_2"})
diff --git a/doc/source/getting_started/comparison/includes/extract_substring.rst b/doc/source/getting_started/comparison/includes/extract_substring.rst
index 78eee286ad467..1ba0dfac2317a 100644
--- a/doc/source/getting_started/comparison/includes/extract_substring.rst
+++ b/doc/source/getting_started/comparison/includes/extract_substring.rst
@@ -4,4 +4,4 @@ indexes are zero-based.
.. ipython:: python
- tips["sex"].str[0:1].head()
+ tips["sex"].str[0:1]
diff --git a/doc/source/getting_started/comparison/includes/find_substring.rst b/doc/source/getting_started/comparison/includes/find_substring.rst
index ee940b64f5cae..42543d05a0014 100644
--- a/doc/source/getting_started/comparison/includes/find_substring.rst
+++ b/doc/source/getting_started/comparison/includes/find_substring.rst
@@ -5,4 +5,4 @@ zero-based.
.. ipython:: python
- tips["sex"].str.find("ale").head()
+ tips["sex"].str.find("ale")
diff --git a/doc/source/getting_started/comparison/includes/groupby.rst b/doc/source/getting_started/comparison/includes/groupby.rst
index caa9f6ec9c9b8..93d5d51e3fb00 100644
--- a/doc/source/getting_started/comparison/includes/groupby.rst
+++ b/doc/source/getting_started/comparison/includes/groupby.rst
@@ -4,4 +4,4 @@ pandas provides a flexible ``groupby`` mechanism that allows similar aggregation
.. ipython:: python
tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+ tips_summed
diff --git a/doc/source/getting_started/comparison/includes/if_then.rst b/doc/source/getting_started/comparison/includes/if_then.rst
index d7977366cfc33..f94e7588827f5 100644
--- a/doc/source/getting_started/comparison/includes/if_then.rst
+++ b/doc/source/getting_started/comparison/includes/if_then.rst
@@ -4,7 +4,7 @@ the ``where`` method from ``numpy``.
.. ipython:: python
tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
- tips.head()
+ tips
.. ipython:: python
:suppress:
diff --git a/doc/source/getting_started/comparison/includes/length.rst b/doc/source/getting_started/comparison/includes/length.rst
index 5a0c803e9eff2..9141fd4ea582a 100644
--- a/doc/source/getting_started/comparison/includes/length.rst
+++ b/doc/source/getting_started/comparison/includes/length.rst
@@ -4,5 +4,5 @@ Use ``len`` and ``rstrip`` to exclude trailing blanks.
.. ipython:: python
- tips["time"].str.len().head()
- tips["time"].str.rstrip().str.len().head()
+ tips["time"].str.len()
+ tips["time"].str.rstrip().str.len()
diff --git a/doc/source/getting_started/comparison/includes/limit.rst b/doc/source/getting_started/comparison/includes/limit.rst
new file mode 100644
index 0000000000000..4efeb4e43d07c
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/limit.rst
@@ -0,0 +1,7 @@
+By default, pandas will truncate output of large ``DataFrame``\s to show the first and last rows.
+This can be overridden by :ref:`changing the pandas options <options>`, or using
+:meth:`DataFrame.head` or :meth:`DataFrame.tail`.
+
+.. ipython:: python
+
+ tips.head(5)
diff --git a/doc/source/getting_started/comparison/includes/missing.rst b/doc/source/getting_started/comparison/includes/missing.rst
index 8e6ba95e98036..341c7d5498d82 100644
--- a/doc/source/getting_started/comparison/includes/missing.rst
+++ b/doc/source/getting_started/comparison/includes/missing.rst
@@ -1,24 +1,31 @@
-This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
-should be used for comparisons.
+In pandas, :meth:`Series.isna` and :meth:`Series.notna` can be used to filter the rows.
.. ipython:: python
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
+ outer_join[outer_join["value_x"].isna()]
+ outer_join[outer_join["value_x"].notna()]
-pandas also provides a variety of methods to work with missing data -- some of
-which would be challenging to express in Stata. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
+pandas provides :ref:`a variety of methods to work with missing data <missing_data>`. Here are some examples:
+
+Drop rows with missing values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. ipython:: python
- # Drop rows with any missing value
outer_join.dropna()
- # Fill forwards
+Forward fill from previous rows
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. ipython:: python
+
outer_join.fillna(method="ffill")
- # Impute missing values with the mean
+Replace missing values with a specified value
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using the mean:
+
+.. ipython:: python
+
outer_join["value_x"].fillna(outer_join["value_x"].mean())
diff --git a/doc/source/getting_started/comparison/includes/missing_intro.rst b/doc/source/getting_started/comparison/includes/missing_intro.rst
index ed97f639f3f3d..366aa43d1264c 100644
--- a/doc/source/getting_started/comparison/includes/missing_intro.rst
+++ b/doc/source/getting_started/comparison/includes/missing_intro.rst
@@ -1,6 +1,6 @@
-Both have a representation for missing data — pandas' is the special float value ``NaN`` (not a
-number). Many of the semantics are the same; for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
+pandas represents missing data with the special float value ``NaN`` (not a number). Many of the
+semantics are the same; for example missing data propagates through numeric operations, and is
+ignored by default for aggregations.
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/sorting.rst b/doc/source/getting_started/comparison/includes/sorting.rst
index 0840c9dd554b7..4e2e40a18adbd 100644
--- a/doc/source/getting_started/comparison/includes/sorting.rst
+++ b/doc/source/getting_started/comparison/includes/sorting.rst
@@ -3,4 +3,4 @@ pandas has a :meth:`DataFrame.sort_values` method, which takes a list of columns
.. ipython:: python
tips = tips.sort_values(["sex", "total_bill"])
- tips.head()
+ tips
diff --git a/doc/source/getting_started/comparison/includes/time_date.rst b/doc/source/getting_started/comparison/includes/time_date.rst
index 12a00b36dc97d..fb9ee2e216cd7 100644
--- a/doc/source/getting_started/comparison/includes/time_date.rst
+++ b/doc/source/getting_started/comparison/includes/time_date.rst
@@ -11,7 +11,7 @@
tips[
["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
- ].head()
+ ]
.. ipython:: python
:suppress:
diff --git a/doc/source/getting_started/comparison/includes/transform.rst b/doc/source/getting_started/comparison/includes/transform.rst
index 0aa5b5b298cf7..b7599471432ad 100644
--- a/doc/source/getting_started/comparison/includes/transform.rst
+++ b/doc/source/getting_started/comparison/includes/transform.rst
@@ -5,4 +5,4 @@ succinctly expressed in one operation.
gb = tips.groupby("smoker")["total_bill"]
tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+ tips
| This helps to clarify the examples by removing code that isn't relevant. Added a dedicated section to the SAS, SQL, and Stata pages.
This builds on https://github.com/pandas-dev/pandas/pull/38933; ~~will rebase and mark as ready for review once that's merged. In the meantime, the last commit is the one that can be reviewed.~~ Thanks!
- [ ] ~~closes #xxxx~~
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38935 | 2021-01-04T03:43:50Z | 2021-01-04T13:21:16Z | 2021-01-04T13:21:16Z | 2021-01-04T13:21:20Z |
ENH: Improve numerical stability for groupby.mean and groupby.cumsum | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index af11b6543a74b..b4b98ec0403a8 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -294,6 +294,7 @@ Groupby/resample/rolling
- Bug in :meth:`SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical series were not tallied (:issue:`38672`)
- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
- Fixed bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`)
+- Fixed bug in :meth:`DataFrameGroupBy.cumsum`, :meth:`SeriesGroupBy.cumsum`, :meth:`DataFrameGroupBy.mean` and :meth:`SeriesGroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index ac8f22263f787..553ecbc58e745 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -246,12 +246,13 @@ def group_cumsum(numeric[:, :] out,
"""
cdef:
Py_ssize_t i, j, N, K, size
- numeric val
- numeric[:, :] accum
+ numeric val, y, t
+ numeric[:, :] accum, compensation
int64_t lab
N, K = (<object>values).shape
accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)
+ compensation = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)
with nogil:
for i in range(N):
@@ -264,7 +265,10 @@ def group_cumsum(numeric[:, :] out,
if numeric == float32_t or numeric == float64_t:
if val == val:
- accum[lab, j] += val
+ y = val - compensation[lab, j]
+ t = accum[lab, j] + y
+ compensation[lab, j] = t - accum[lab, j] - y
+ accum[lab, j] = t
out[i, j] = accum[lab, j]
else:
out[i, j] = NaN
@@ -272,7 +276,10 @@ def group_cumsum(numeric[:, :] out,
accum[lab, j] = NaN
break
else:
- accum[lab, j] += val
+ y = val - compensation[lab, j]
+ t = accum[lab, j] + y
+ compensation[lab, j] = t - accum[lab, j] - y
+ accum[lab, j] = t
out[i, j] = accum[lab, j]
@@ -637,8 +644,8 @@ def _group_mean(floating[:, :] out,
Py_ssize_t min_count=-1):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- floating val, count
- floating[:, :] sumx
+ floating val, count, y, t
+ floating[:, :] sumx, compensation
int64_t[:, :] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
@@ -649,6 +656,7 @@ def _group_mean(floating[:, :] out,
nobs = np.zeros((<object>out).shape, dtype=np.int64)
sumx = np.zeros_like(out)
+ compensation = np.zeros_like(out)
N, K = (<object>values).shape
@@ -664,7 +672,10 @@ def _group_mean(floating[:, :] out,
# not nan
if val == val:
nobs[lab, j] += 1
- sumx[lab, j] += val
+ y = val - compensation[lab, j]
+ t = sumx[lab, j] + y
+ compensation[lab, j] = t - sumx[lab, j] - y
+ sumx[lab, j] = t
for i in range(ncounts):
for j in range(K):
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index e1c63448a2d22..5735f895e33b6 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2178,12 +2178,26 @@ def test_groupby_series_with_tuple_name():
@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
-def test_groupby_numerical_stability_sum():
+@pytest.mark.parametrize(
+ "func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
+)
+def test_groupby_numerical_stability_sum_mean(func, values):
# GH#38778
data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
- result = df.groupby("group").sum()
- expected = DataFrame(
- {"a": [97.0, 98.0], "b": [97.0, 98.0]}, index=Index([1, 2], name="group")
- )
+ result = getattr(df.groupby("group"), func)()
+ expected = DataFrame({"a": values, "b": values}, index=Index([1, 2], name="group"))
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
+def test_groupby_numerical_stability_cumsum():
+ # GH#38934
+ data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
+ df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
+ result = df.groupby("group").cumsum()
+ exp_data = (
+ [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]
+ )
+ expected = DataFrame({"a": exp_data, "b": exp_data})
+ tm.assert_frame_equal(result, expected, check_exact=True)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38934 | 2021-01-04T03:34:55Z | 2021-01-04T13:25:06Z | 2021-01-04T13:25:06Z | 2021-01-04T13:26:44Z |
DOC: improve shared content between comparison pages | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index eb11b75027909..b97efe31b8b29 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -308,8 +308,8 @@ Sorting in SAS is accomplished via ``PROC SORT``
String processing
-----------------
-Length
-~~~~~~
+Finding length of string
+~~~~~~~~~~~~~~~~~~~~~~~~
SAS determines the length of a character string with the
`LENGTHN <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002284668.htm>`__
@@ -327,8 +327,8 @@ functions. ``LENGTHN`` excludes trailing blanks and ``LENGTHC`` includes trailin
.. include:: includes/length.rst
-Find
-~~~~
+Finding position of substring
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SAS determines the position of a character in a string with the
`FINDW <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002978282.htm>`__ function.
@@ -342,19 +342,11 @@ you supply as the second argument.
put(FINDW(sex,'ale'));
run;
-Python determines the position of a character in a string with the
-``find`` function. ``find`` searches for the first position of the
-substring. If the substring is found, the function returns its
-position. Keep in mind that Python indexes are zero-based and
-the function will return -1 if it fails to find the substring.
-
-.. ipython:: python
-
- tips["sex"].str.find("ale").head()
+.. include:: includes/find_substring.rst
-Substring
-~~~~~~~~~
+Extracting substring by position
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SAS extracts a substring from a string based on its position with the
`SUBSTR <https://www2.sas.com/proceedings/sugi25/25/cc/25p088.pdf>`__ function.
@@ -366,17 +358,11 @@ SAS extracts a substring from a string based on its position with the
put(substr(sex,1,1));
run;
-With pandas you can use ``[]`` notation to extract a substring
-from a string by position locations. Keep in mind that Python
-indexes are zero-based.
+.. include:: includes/extract_substring.rst
-.. ipython:: python
- tips["sex"].str[0:1].head()
-
-
-Scan
-~~~~
+Extracting nth word
+~~~~~~~~~~~~~~~~~~~
The SAS `SCAN <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000214639.htm>`__
function returns the nth word from a string. The first argument is the string you want to parse and the
@@ -394,20 +380,11 @@ second argument specifies which word you want to extract.
;;;
run;
-Python extracts a substring from a string based on its text
-by using regular expressions. There are much more powerful
-approaches, but this just shows a simple approach.
-
-.. ipython:: python
-
- firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
- firstlast["First_Name"] = firstlast["String"].str.split(" ", expand=True)[0]
- firstlast["Last_Name"] = firstlast["String"].str.rsplit(" ", expand=True)[0]
- firstlast
+.. include:: includes/nth_word.rst
-Upcase, lowcase, and propcase
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Changing case
+~~~~~~~~~~~~~
The SAS `UPCASE <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245965.htm>`__
`LOWCASE <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245912.htm>`__ and
@@ -427,27 +404,13 @@ functions change the case of the argument.
;;;
run;
-The equivalent Python functions are ``upper``, ``lower``, and ``title``.
+.. include:: includes/case.rst
-.. ipython:: python
-
- firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
- firstlast["string_up"] = firstlast["String"].str.upper()
- firstlast["string_low"] = firstlast["String"].str.lower()
- firstlast["string_prop"] = firstlast["String"].str.title()
- firstlast
Merging
-------
-The following tables will be used in the merge examples
-
-.. ipython:: python
-
- df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
- df1
- df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
- df2
+.. include:: includes/merge_setup.rst
In SAS, data must be explicitly sorted before merging. Different
types of joins are accomplished using the ``in=`` dummy
@@ -473,39 +436,13 @@ input frames.
if a or b then output outer_join;
run;
-pandas DataFrames have a :meth:`~DataFrame.merge` method, which provides
-similar functionality. Note that the data does not have
-to be sorted ahead of time, and different join
-types are accomplished via the ``how`` keyword.
-
-.. ipython:: python
-
- inner_join = df1.merge(df2, on=["key"], how="inner")
- inner_join
-
- left_join = df1.merge(df2, on=["key"], how="left")
- left_join
-
- right_join = df1.merge(df2, on=["key"], how="right")
- right_join
-
- outer_join = df1.merge(df2, on=["key"], how="outer")
- outer_join
+.. include:: includes/merge.rst
Missing data
------------
-Like SAS, pandas has a representation for missing data - which is the
-special float value ``NaN`` (not a number). Many of the semantics
-are the same, for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
-
-.. ipython:: python
-
- outer_join
- outer_join["value_x"] + outer_join["value_y"]
- outer_join["value_x"].sum()
+.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
For example, in SAS you could do this to filter missing values.
@@ -522,25 +459,7 @@ For example, in SAS you could do this to filter missing values.
if value_x ^= .;
run;
-Which doesn't work in pandas. Instead, the ``pd.isna`` or ``pd.notna`` functions
-should be used for comparisons.
-
-.. ipython:: python
-
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
-
-pandas also provides a variety of methods to work with missing data - some of
-which would be challenging to express in SAS. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
-
-.. ipython:: python
-
- outer_join.dropna()
- outer_join.fillna(method="ffill")
- outer_join["value_x"].fillna(outer_join["value_x"].mean())
+.. include:: includes/missing.rst
GroupBy
@@ -549,7 +468,7 @@ GroupBy
Aggregation
~~~~~~~~~~~
-SAS's PROC SUMMARY can be used to group by one or
+SAS's ``PROC SUMMARY`` can be used to group by one or
more key variables and compute aggregations on
numeric columns.
@@ -561,14 +480,7 @@ numeric columns.
output out=tips_summed sum=;
run;
-pandas provides a flexible ``groupby`` mechanism that
-allows similar aggregations. See the :ref:`groupby documentation<groupby>`
-for more details and examples.
-
-.. ipython:: python
-
- tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+.. include:: includes/groupby.rst
Transformation
@@ -597,16 +509,7 @@ example, to subtract the mean for each observation by smoker group.
if a and b;
run;
-
-pandas ``groupby`` provides a ``transform`` mechanism that allows
-these type of operations to be succinctly expressed in one
-operation.
-
-.. ipython:: python
-
- gb = tips.groupby("smoker")["total_bill"]
- tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+.. include:: includes/transform.rst
By group processing
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index d1ad18bddb0a7..ca536e7273870 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -311,15 +311,7 @@ first position of the substring you supply as the second argument.
generate str_position = strpos(sex, "ale")
-Python determines the position of a character in a string with the
-:func:`find` function. ``find`` searches for the first position of the
-substring. If the substring is found, the function returns its
-position. Keep in mind that Python indexes are zero-based and
-the function will return -1 if it fails to find the substring.
-
-.. ipython:: python
-
- tips["sex"].str.find("ale").head()
+.. include:: includes/find_substring.rst
Extracting substring by position
@@ -331,13 +323,7 @@ Stata extracts a substring from a string based on its position with the :func:`s
generate short_sex = substr(sex, 1, 1)
-With pandas you can use ``[]`` notation to extract a substring
-from a string by position locations. Keep in mind that Python
-indexes are zero-based.
-
-.. ipython:: python
-
- tips["sex"].str[0:1].head()
+.. include:: includes/extract_substring.rst
Extracting nth word
@@ -358,16 +344,7 @@ second argument specifies which word you want to extract.
generate first_name = word(name, 1)
generate last_name = word(name, -1)
-Python extracts a substring from a string based on its text
-by using regular expressions. There are much more powerful
-approaches, but this just shows a simple approach.
-
-.. ipython:: python
-
- firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
- firstlast["First_Name"] = firstlast["string"].str.split(" ", expand=True)[0]
- firstlast["Last_Name"] = firstlast["string"].str.rsplit(" ", expand=True)[0]
- firstlast
+.. include:: includes/nth_word.rst
Changing case
@@ -390,27 +367,13 @@ change the case of ASCII and Unicode strings, respectively.
generate title = strproper(string)
list
-The equivalent Python functions are ``upper``, ``lower``, and ``title``.
-
-.. ipython:: python
+.. include:: includes/case.rst
- firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
- firstlast["upper"] = firstlast["string"].str.upper()
- firstlast["lower"] = firstlast["string"].str.lower()
- firstlast["title"] = firstlast["string"].str.title()
- firstlast
Merging
-------
-The following tables will be used in the merge examples
-
-.. ipython:: python
-
- df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
- df1
- df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
- df2
+.. include:: includes/merge_setup.rst
In Stata, to perform a merge, one data set must be in memory
and the other must be referenced as a file name on disk. In
@@ -465,38 +428,13 @@ or the intersection of the two by using the values created in the
restore
merge 1:n key using df2.dta
-pandas DataFrames have a :meth:`DataFrame.merge` method, which provides
-similar functionality. Note that different join
-types are accomplished via the ``how`` keyword.
-
-.. ipython:: python
-
- inner_join = df1.merge(df2, on=["key"], how="inner")
- inner_join
-
- left_join = df1.merge(df2, on=["key"], how="left")
- left_join
-
- right_join = df1.merge(df2, on=["key"], how="right")
- right_join
-
- outer_join = df1.merge(df2, on=["key"], how="outer")
- outer_join
+.. include:: includes/merge_setup.rst
Missing data
------------
-Like Stata, pandas has a representation for missing data -- the
-special float value ``NaN`` (not a number). Many of the semantics
-are the same; for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
-
-.. ipython:: python
-
- outer_join
- outer_join["value_x"] + outer_join["value_y"]
- outer_join["value_x"].sum()
+.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
For example, in Stata you could do this to filter missing values.
@@ -508,30 +446,7 @@ For example, in Stata you could do this to filter missing values.
* Keep non-missing values
list if value_x != .
-This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
-should be used for comparisons.
-
-.. ipython:: python
-
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
-
-pandas also provides a variety of methods to work with missing data -- some of
-which would be challenging to express in Stata. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
-
-.. ipython:: python
-
- # Drop rows with any missing value
- outer_join.dropna()
-
- # Fill forwards
- outer_join.fillna(method="ffill")
-
- # Impute missing values with the mean
- outer_join["value_x"].fillna(outer_join["value_x"].mean())
+.. include:: includes/missing.rst
GroupBy
@@ -548,14 +463,7 @@ numeric columns.
collapse (sum) total_bill tip, by(sex smoker)
-pandas provides a flexible ``groupby`` mechanism that
-allows similar aggregations. See the :ref:`groupby documentation<groupby>`
-for more details and examples.
-
-.. ipython:: python
-
- tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+.. include:: includes/groupby.rst
Transformation
@@ -570,16 +478,7 @@ For example, to subtract the mean for each observation by smoker group.
bysort sex smoker: egen group_bill = mean(total_bill)
generate adj_total_bill = total_bill - group_bill
-
-pandas ``groupby`` provides a ``transform`` mechanism that allows
-these type of operations to be succinctly expressed in one
-operation.
-
-.. ipython:: python
-
- gb = tips.groupby("smoker")["total_bill"]
- tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+.. include:: includes/transform.rst
By group processing
diff --git a/doc/source/getting_started/comparison/includes/case.rst b/doc/source/getting_started/comparison/includes/case.rst
new file mode 100644
index 0000000000000..c00a830bc8511
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/case.rst
@@ -0,0 +1,10 @@
+The equivalent pandas methods are :meth:`Series.str.upper`, :meth:`Series.str.lower`, and
+:meth:`Series.str.title`.
+
+.. ipython:: python
+
+ firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
+ firstlast["upper"] = firstlast["string"].str.upper()
+ firstlast["lower"] = firstlast["string"].str.lower()
+ firstlast["title"] = firstlast["string"].str.title()
+ firstlast
diff --git a/doc/source/getting_started/comparison/includes/extract_substring.rst b/doc/source/getting_started/comparison/includes/extract_substring.rst
new file mode 100644
index 0000000000000..78eee286ad467
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/extract_substring.rst
@@ -0,0 +1,7 @@
+With pandas you can use ``[]`` notation to extract a substring
+from a string by position locations. Keep in mind that Python
+indexes are zero-based.
+
+.. ipython:: python
+
+ tips["sex"].str[0:1].head()
diff --git a/doc/source/getting_started/comparison/includes/find_substring.rst b/doc/source/getting_started/comparison/includes/find_substring.rst
new file mode 100644
index 0000000000000..ee940b64f5cae
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/find_substring.rst
@@ -0,0 +1,8 @@
+You can find the position of a character in a column of strings with the :meth:`Series.str.find`
+method. ``find`` searches for the first position of the substring. If the substring is found, the
+method returns its position. If not found, it returns ``-1``. Keep in mind that Python indexes are
+zero-based.
+
+.. ipython:: python
+
+ tips["sex"].str.find("ale").head()
diff --git a/doc/source/getting_started/comparison/includes/groupby.rst b/doc/source/getting_started/comparison/includes/groupby.rst
new file mode 100644
index 0000000000000..caa9f6ec9c9b8
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/groupby.rst
@@ -0,0 +1,7 @@
+pandas provides a flexible ``groupby`` mechanism that allows similar aggregations. See the
+:ref:`groupby documentation<groupby>` for more details and examples.
+
+.. ipython:: python
+
+ tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
+ tips_summed.head()
diff --git a/doc/source/getting_started/comparison/includes/length.rst b/doc/source/getting_started/comparison/includes/length.rst
index 9581c661c0170..5a0c803e9eff2 100644
--- a/doc/source/getting_started/comparison/includes/length.rst
+++ b/doc/source/getting_started/comparison/includes/length.rst
@@ -1,4 +1,4 @@
-Python determines the length of a character string with the ``len`` function.
+You can find the length of a character string with :meth:`Series.str.len`.
In Python 3, all strings are Unicode strings. ``len`` includes trailing blanks.
Use ``len`` and ``rstrip`` to exclude trailing blanks.
diff --git a/doc/source/getting_started/comparison/includes/merge.rst b/doc/source/getting_started/comparison/includes/merge.rst
new file mode 100644
index 0000000000000..b8e3f54fd132b
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/merge.rst
@@ -0,0 +1,17 @@
+pandas DataFrames have a :meth:`~DataFrame.merge` method, which provides similar functionality. The
+data does not have to be sorted ahead of time, and different join types are accomplished via the
+``how`` keyword.
+
+.. ipython:: python
+
+ inner_join = df1.merge(df2, on=["key"], how="inner")
+ inner_join
+
+ left_join = df1.merge(df2, on=["key"], how="left")
+ left_join
+
+ right_join = df1.merge(df2, on=["key"], how="right")
+ right_join
+
+ outer_join = df1.merge(df2, on=["key"], how="outer")
+ outer_join
diff --git a/doc/source/getting_started/comparison/includes/merge_setup.rst b/doc/source/getting_started/comparison/includes/merge_setup.rst
new file mode 100644
index 0000000000000..f115cd58f7a94
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/merge_setup.rst
@@ -0,0 +1,8 @@
+The following tables will be used in the merge examples:
+
+.. ipython:: python
+
+ df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
+ df1
+ df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
+ df2
diff --git a/doc/source/getting_started/comparison/includes/missing.rst b/doc/source/getting_started/comparison/includes/missing.rst
new file mode 100644
index 0000000000000..8e6ba95e98036
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/missing.rst
@@ -0,0 +1,24 @@
+This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
+should be used for comparisons.
+
+.. ipython:: python
+
+ outer_join[pd.isna(outer_join["value_x"])]
+ outer_join[pd.notna(outer_join["value_x"])]
+
+pandas also provides a variety of methods to work with missing data -- some of
+which would be challenging to express in Stata. For example, there are methods to
+drop all rows with any missing values, replacing missing values with a specified
+value, like the mean, or forward filling from previous rows. See the
+:ref:`missing data documentation<missing_data>` for more.
+
+.. ipython:: python
+
+ # Drop rows with any missing value
+ outer_join.dropna()
+
+ # Fill forwards
+ outer_join.fillna(method="ffill")
+
+ # Impute missing values with the mean
+ outer_join["value_x"].fillna(outer_join["value_x"].mean())
diff --git a/doc/source/getting_started/comparison/includes/missing_intro.rst b/doc/source/getting_started/comparison/includes/missing_intro.rst
new file mode 100644
index 0000000000000..ed97f639f3f3d
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/missing_intro.rst
@@ -0,0 +1,9 @@
+Both have a representation for missing data — pandas' is the special float value ``NaN`` (not a
+number). Many of the semantics are the same; for example missing data propagates through numeric
+operations, and is ignored by default for aggregations.
+
+.. ipython:: python
+
+ outer_join
+ outer_join["value_x"] + outer_join["value_y"]
+ outer_join["value_x"].sum()
diff --git a/doc/source/getting_started/comparison/includes/nth_word.rst b/doc/source/getting_started/comparison/includes/nth_word.rst
new file mode 100644
index 0000000000000..7af0285005d5b
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/nth_word.rst
@@ -0,0 +1,9 @@
+The simplest way to extract words in pandas is to split the strings by spaces, then reference the
+word by index. Note there are more powerful approaches should you need them.
+
+.. ipython:: python
+
+ firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
+ firstlast["First_Name"] = firstlast["String"].str.split(" ", expand=True)[0]
+ firstlast["Last_Name"] = firstlast["String"].str.rsplit(" ", expand=True)[0]
+ firstlast
diff --git a/doc/source/getting_started/comparison/includes/sorting.rst b/doc/source/getting_started/comparison/includes/sorting.rst
index 23f11ff485474..0840c9dd554b7 100644
--- a/doc/source/getting_started/comparison/includes/sorting.rst
+++ b/doc/source/getting_started/comparison/includes/sorting.rst
@@ -1,5 +1,4 @@
-pandas objects have a :meth:`DataFrame.sort_values` method, which
-takes a list of columns to sort by.
+pandas has a :meth:`DataFrame.sort_values` method, which takes a list of columns to sort by.
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/transform.rst b/doc/source/getting_started/comparison/includes/transform.rst
new file mode 100644
index 0000000000000..0aa5b5b298cf7
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/transform.rst
@@ -0,0 +1,8 @@
+pandas provides a :ref:`groupby.transform` mechanism that allows these type of operations to be
+succinctly expressed in one operation.
+
+.. ipython:: python
+
+ gb = tips.groupby("smoker")["total_bill"]
+ tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
+ tips.head()
| This pull request does a few things between the SAS and Stata pages, in separate commits:
- Makes the headings match, where it makes sense for them to
- Create more shared includes, as a follow-up to https://github.com/pandas-dev/pandas/pull/38887
- Improves some wording and ensures more methods are linked in the comparison includes
The motivation here is that I'm working on adding the other sections to the Comparison to Spreadsheets page, and want to ensure they're consistent.
---
- [ ] ~~closes #xxxx~~
- [x] tests added / passed
- [ ] ~~passes `black pandas`~~
- [ ] ~~passes `git diff upstream/master -u -- "*.py" | flake8 --diff`~~
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38933 | 2021-01-03T23:31:09Z | 2021-01-04T03:55:07Z | 2021-01-04T03:55:07Z | 2021-01-04T04:53:19Z |
BUG: rank_2d raising with mixed dtypes | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index af11b6543a74b..0884065247fbc 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -217,6 +217,8 @@ Numeric
- Bug in :meth:`DataFrame.select_dtypes` with ``include=np.number`` now retains numeric ``ExtensionDtype`` columns (:issue:`35340`)
- Bug in :meth:`DataFrame.mode` and :meth:`Series.mode` not keeping consistent integer :class:`Index` for empty input (:issue:`33321`)
- Bug in :meth:`DataFrame.rank` with ``np.inf`` and mixture of ``np.nan`` and ``np.inf`` (:issue:`32593`)
+- Bug in :meth:`DataFrame.rank` with ``axis=0`` and columns holding incomparable types raising ``IndexError`` (:issue:`38932`)
+-
Conversion
^^^^^^^^^^
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 3aa4738b36dc8..76bfb001cea81 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -26,6 +26,7 @@ from numpy cimport (
int16_t,
int32_t,
int64_t,
+ intp_t,
ndarray,
uint8_t,
uint16_t,
@@ -1105,14 +1106,13 @@ def rank_2d(
Py_ssize_t infs
ndarray[float64_t, ndim=2] ranks
ndarray[rank_t, ndim=2] values
- ndarray[int64_t, ndim=2] argsorted
+ ndarray[intp_t, ndim=2] argsort_indexer
ndarray[uint8_t, ndim=2] mask
rank_t val, nan_value
float64_t count, sum_ranks = 0.0
int tiebreak = 0
int64_t idx
bint check_mask, condition, keep_na
- const int64_t[:] labels
tiebreak = tiebreakers[ties_method]
@@ -1158,40 +1158,19 @@ def rank_2d(
n, k = (<object>values).shape
ranks = np.empty((n, k), dtype='f8')
- # For compatibility when calling rank_1d
- labels = np.zeros(k, dtype=np.int64)
- if rank_t is object:
- try:
- _as = values.argsort(1)
- except TypeError:
- values = in_arr
- for i in range(len(values)):
- ranks[i] = rank_1d(
- in_arr[i],
- labels=labels,
- ties_method=ties_method,
- ascending=ascending,
- pct=pct
- )
- if axis == 0:
- return ranks.T
- else:
- return ranks
+ if tiebreak == TIEBREAK_FIRST:
+ # need to use a stable sort here
+ argsort_indexer = values.argsort(axis=1, kind='mergesort')
+ if not ascending:
+ tiebreak = TIEBREAK_FIRST_DESCENDING
else:
- if tiebreak == TIEBREAK_FIRST:
- # need to use a stable sort here
- _as = values.argsort(axis=1, kind='mergesort')
- if not ascending:
- tiebreak = TIEBREAK_FIRST_DESCENDING
- else:
- _as = values.argsort(1)
+ argsort_indexer = values.argsort(1)
if not ascending:
- _as = _as[:, ::-1]
+ argsort_indexer = argsort_indexer[:, ::-1]
- values = _take_2d(values, _as)
- argsorted = _as.astype('i8')
+ values = _take_2d(values, argsort_indexer)
for i in range(n):
dups = sum_ranks = infs = 0
@@ -1200,7 +1179,7 @@ def rank_2d(
count = 0.0
for j in range(k):
val = values[i, j]
- idx = argsorted[i, j]
+ idx = argsort_indexer[i, j]
if keep_na and check_mask and mask[i, idx]:
ranks[i, idx] = NaN
infs += 1
@@ -1215,38 +1194,38 @@ def rank_2d(
condition = (
j == k - 1 or
are_diff(values[i, j + 1], val) or
- (keep_na and check_mask and mask[i, argsorted[i, j + 1]])
+ (keep_na and check_mask and mask[i, argsort_indexer[i, j + 1]])
)
else:
condition = (
j == k - 1 or
values[i, j + 1] != val or
- (keep_na and check_mask and mask[i, argsorted[i, j + 1]])
+ (keep_na and check_mask and mask[i, argsort_indexer[i, j + 1]])
)
if condition:
if tiebreak == TIEBREAK_AVERAGE:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = sum_ranks / dups
+ ranks[i, argsort_indexer[i, z]] = sum_ranks / dups
elif tiebreak == TIEBREAK_MIN:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = j - dups + 2
+ ranks[i, argsort_indexer[i, z]] = j - dups + 2
elif tiebreak == TIEBREAK_MAX:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = j + 1
+ ranks[i, argsort_indexer[i, z]] = j + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
raise ValueError('first not supported for non-numeric data')
else:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = z + 1
+ ranks[i, argsort_indexer[i, z]] = z + 1
elif tiebreak == TIEBREAK_FIRST_DESCENDING:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = 2 * j - z - dups + 2
+ ranks[i, argsort_indexer[i, z]] = 2 * j - z - dups + 2
elif tiebreak == TIEBREAK_DENSE:
total_tie_count += 1
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = total_tie_count
+ ranks[i, argsort_indexer[i, z]] = total_tie_count
sum_ranks = dups = 0
if pct:
if tiebreak == TIEBREAK_DENSE:
diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py
index 6ad1b475e28a2..4255c1cb5e65f 100644
--- a/pandas/tests/frame/methods/test_rank.py
+++ b/pandas/tests/frame/methods/test_rank.py
@@ -445,3 +445,15 @@ def test_rank_both_inf(self):
expected = DataFrame({"a": [1.0, 2.0, 3.0]})
result = df.rank()
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "data,expected",
+ [
+ ({"a": [1, 2, "a"], "b": [4, 5, 6]}, DataFrame({"b": [1.0, 2.0, 3.0]})),
+ ({"a": [1, 2, "a"]}, DataFrame(index=range(3))),
+ ],
+ )
+ def test_rank_mixed_axis_zero(self, data, expected):
+ df = DataFrame(data)
+ result = df.rank()
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38932 | 2021-01-03T22:21:58Z | 2021-01-05T00:45:16Z | 2021-01-05T00:45:16Z | 2021-01-05T01:30:14Z |
BUG: DataFrame.__setitem__ raising ValueError with string indexer and empty df and df to set | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index ac3b5dcaf53ae..e46c729348d33 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -244,6 +244,7 @@ Indexing
- Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`)
- Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`)
- Bug in :meth:`DataFrame.loc`, :meth:`Series.loc`, :meth:`DataFrame.__getitem__` and :meth:`Series.__getitem__` returning incorrect elements for non-monotonic :class:`DatetimeIndex` for string slices (:issue:`33146`)
+- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` with empty :class:`DataFrame` and specified columns for string indexer and non empty :class:`DataFrame` to set (:issue:`38831`)
- Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`)
- Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1abbe37e67b09..aeae39094ba7c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3334,13 +3334,14 @@ def _ensure_valid_index(self, value):
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
- try:
- value = Series(value)
- except (ValueError, NotImplementedError, TypeError) as err:
- raise ValueError(
- "Cannot set a frame with no defined index "
- "and a value that cannot be converted to a Series"
- ) from err
+ if not isinstance(value, DataFrame):
+ try:
+ value = Series(value)
+ except (ValueError, NotImplementedError, TypeError) as err:
+ raise ValueError(
+ "Cannot set a frame with no defined index "
+ "and a value that cannot be converted to a Series"
+ ) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 19d2f8301037a..28b1f02ff020c 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -338,6 +338,16 @@ def test_setitem_bool_with_numeric_index(self, dtype):
tm.assert_index_equal(df.columns, expected_cols)
+ @pytest.mark.parametrize("indexer", ["B", ["B"]])
+ def test_setitem_frame_length_0_str_key(self, indexer):
+ # GH#38831
+ df = DataFrame(columns=["A", "B"])
+ other = DataFrame({"B": [1, 2]})
+ df[indexer] = other
+ expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
+ expected["A"] = expected["A"].astype("object")
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemWithExpansion:
def test_setitem_listlike_views(self):
| - [x] closes #38831
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Dont have to convert df to series, df has an index.
cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/38931 | 2021-01-03T22:14:19Z | 2021-01-06T00:35:39Z | 2021-01-06T00:35:38Z | 2021-01-06T01:13:51Z |
TST: Replace pytest.xfail | diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index bc1295cc0a0ce..2548fc18e4032 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -386,9 +386,13 @@ def test_asi8_deprecation(self, index):
@pytest.mark.parametrize("na_position", [None, "middle"])
-def test_sort_values_invalid_na_position(index_with_missing, na_position):
- if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
- pytest.xfail("missing value sorting order not defined for index type")
+def test_sort_values_invalid_na_position(request, index_with_missing, na_position):
+ if isinstance(index_with_missing, MultiIndex):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="missing value sorting order not defined for index type"
+ )
+ )
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
@@ -396,12 +400,16 @@ def test_sort_values_invalid_na_position(index_with_missing, na_position):
@pytest.mark.parametrize("na_position", ["first", "last"])
-def test_sort_values_with_missing(index_with_missing, na_position):
+def test_sort_values_with_missing(request, index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
- if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
- pytest.xfail("missing value sorting order not defined for index type")
+ if isinstance(index_with_missing, MultiIndex):
+ request.node.add_marker(
+ pytest.mark.xfail(reason="missing value sorting order not implemented")
+ )
+ elif isinstance(index_with_missing, CategoricalIndex):
+ pytest.skip("missing value sorting order not well-defined")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 64b08c6058b81..f2a33df71e8e3 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -49,7 +49,7 @@ def test_union_different_types(request, index, index_fixture2):
)
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
- pytest.xfail("This test doesn't consider multiindixes.")
+ pytest.skip("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.skip("This test only considers non matching dtypes.")
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index d6d0723bee0e8..f79a822481ea0 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -328,14 +328,15 @@ def test_series_where(self, obj, key, expected):
tm.assert_series_equal(res, expected)
def test_index_where(self, obj, key, expected, request):
- if obj.dtype == bool:
- msg = "Index/Series casting behavior inconsistent GH#38692"
- mark = pytest.xfail(reason=msg)
- request.node.add_marker(mark)
-
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
+ if obj.dtype == bool and not mask.all():
+ # When mask is all True, casting behavior does not apply
+ msg = "Index/Series casting behavior inconsistent GH#38692"
+ mark = pytest.mark.xfail(reason=msg)
+ request.node.add_marker(mark)
+
res = Index(obj).where(~mask, np.nan)
tm.assert_index_equal(res, Index(expected))
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index 59c68fba53e25..edcec386cd8ba 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -489,10 +489,6 @@ def test_where_datetimelike_categorical(tz_naive_fixture):
tm.assert_series_equal(res, Series(dr))
# DataFrame.where
- if tz is None:
- res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
- else:
- with pytest.xfail(reason="frame._values loses tz"):
- res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
+ res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
tm.assert_frame_equal(res, pd.DataFrame(dr))
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index a0e0213a6dce5..219aaddb116cd 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -737,12 +737,18 @@ def test_align_date_objects_with_datetimeindex(self):
class TestNamePreservation:
@pytest.mark.parametrize("box", [list, tuple, np.array, Index, Series, pd.array])
@pytest.mark.parametrize("flex", [True, False])
- def test_series_ops_name_retention(self, flex, box, names, all_binary_operators):
+ def test_series_ops_name_retention(
+ self, request, flex, box, names, all_binary_operators
+ ):
# GH#33930 consistent name renteiton
op = all_binary_operators
- if op is ops.rfloordiv and box in [list, tuple]:
- pytest.xfail("op fails because of inconsistent ndarray-wrapping GH#28759")
+ if op is ops.rfloordiv and box in [list, tuple] and not flex:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="op fails because of inconsistent ndarray-wrapping GH#28759"
+ )
+ )
left = Series(range(10), name=names[0])
right = Series(range(10), name=names[1])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Part of #38902. | https://api.github.com/repos/pandas-dev/pandas/pulls/38929 | 2021-01-03T21:10:21Z | 2021-01-04T01:26:12Z | 2021-01-04T01:26:12Z | 2021-01-04T21:05:18Z |
TST: stricten xfails | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index e6d1cd5f47d8d..22eb642ed8512 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -821,7 +821,6 @@ def test_frame_with_frame_reindex(self):
(np.datetime64(20, "ns"), "<M8[ns]"),
],
)
- @pytest.mark.xfail(reason="GH38630", strict=False)
@pytest.mark.parametrize(
"op",
[
@@ -835,9 +834,12 @@ def test_frame_with_frame_reindex(self):
ids=lambda x: x.__name__,
)
def test_binop_other(self, op, value, dtype):
+
skip = {
(operator.truediv, "bool"),
(operator.pow, "bool"),
+ (operator.add, "bool"),
+ (operator.mul, "bool"),
}
e = DummyElement(value, dtype)
@@ -879,12 +881,18 @@ def test_binop_other(self, op, value, dtype):
elif (op, dtype) in skip:
- msg = "operator '.*' not implemented for .* dtypes"
- with pytest.raises(NotImplementedError, match=msg):
+ if op in [operator.add, operator.mul]:
with tm.assert_produces_warning(UserWarning):
# "evaluating in Python space because ..."
op(s, e.value)
+ else:
+ msg = "operator '.*' not implemented for .* dtypes"
+ with pytest.raises(NotImplementedError, match=msg):
+ with tm.assert_produces_warning(UserWarning):
+ # "evaluating in Python space because ..."
+ op(s, e.value)
+
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index bc1295cc0a0ce..a1a3ab554225b 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -16,12 +16,10 @@
from pandas import (
CategoricalIndex,
DatetimeIndex,
- Int64Index,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
- UInt64Index,
)
import pandas._testing as tm
@@ -371,12 +369,9 @@ def test_ravel_deprecation(self, index):
with tm.assert_produces_warning(FutureWarning):
index.ravel()
- @pytest.mark.xfail(reason="GH38630", strict=False)
def test_asi8_deprecation(self, index):
# GH#37877
- if isinstance(
- index, (Int64Index, UInt64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex)
- ):
+ if isinstance(index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
warn = None
else:
warn = FutureWarning
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38927 | 2021-01-03T18:51:51Z | 2021-01-04T13:36:07Z | 2021-01-04T13:36:07Z | 2021-01-04T16:22:59Z |
ENH: Add support to import optional submodule and specify different min_version than default | diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 3775a47d44521..def881b8fd863 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -1,6 +1,8 @@
import distutils.version
import importlib
+import sys
import types
+from typing import Optional
import warnings
# Update install.rst when updating versions!
@@ -58,7 +60,11 @@ def _get_version(module: types.ModuleType) -> str:
def import_optional_dependency(
- name: str, extra: str = "", raise_on_missing: bool = True, on_version: str = "raise"
+ name: str,
+ extra: str = "",
+ raise_on_missing: bool = True,
+ on_version: str = "raise",
+ min_version: Optional[str] = None,
):
"""
Import an optional dependency.
@@ -70,8 +76,7 @@ def import_optional_dependency(
Parameters
----------
name : str
- The module name. This should be top-level only, so that the
- version may be checked.
+ The module name.
extra : str
Additional text to include in the ImportError message.
raise_on_missing : bool, default True
@@ -85,7 +90,9 @@ def import_optional_dependency(
* ignore: Return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``on_version="ignore"`` (see. ``io/html.py``)
-
+ min_version : str, default None
+ Specify a minimum version that is different from the global pandas
+ minimum version required.
Returns
-------
maybe_module : Optional[ModuleType]
@@ -110,13 +117,20 @@ def import_optional_dependency(
else:
return None
- minimum_version = VERSIONS.get(name)
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
+ parent = name.split(".")[0]
+ if parent != name:
+ install_name = parent
+ module_to_get = sys.modules[install_name]
+ else:
+ module_to_get = module
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
if minimum_version:
- version = _get_version(module)
+ version = _get_version(module_to_get)
if distutils.version.LooseVersion(version) < minimum_version:
assert on_version in {"warn", "raise", "ignore"}
msg = (
- f"Pandas requires version '{minimum_version}' or newer of '{name}' "
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
f"(version '{version}' currently installed)."
)
if on_version == "warn":
diff --git a/pandas/tests/test_optional_dependency.py b/pandas/tests/test_optional_dependency.py
index e5ed69b7703b1..304ec124ac8c5 100644
--- a/pandas/tests/test_optional_dependency.py
+++ b/pandas/tests/test_optional_dependency.py
@@ -33,6 +33,10 @@ def test_bad_version(monkeypatch):
with pytest.raises(ImportError, match=match):
import_optional_dependency("fakemodule")
+ # Test min_version parameter
+ result = import_optional_dependency("fakemodule", min_version="0.8")
+ assert result is module
+
with tm.assert_produces_warning(UserWarning):
result = import_optional_dependency("fakemodule", on_version="warn")
assert result is None
@@ -42,6 +46,31 @@ def test_bad_version(monkeypatch):
assert result is module
+def test_submodule(monkeypatch):
+ # Create a fake module with a submodule
+ name = "fakemodule"
+ module = types.ModuleType(name)
+ module.__version__ = "0.9.0"
+ sys.modules[name] = module
+ sub_name = "submodule"
+ submodule = types.ModuleType(sub_name)
+ setattr(module, sub_name, submodule)
+ sys.modules[f"{name}.{sub_name}"] = submodule
+ monkeypatch.setitem(VERSIONS, name, "1.0.0")
+
+ match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
+ with pytest.raises(ImportError, match=match):
+ import_optional_dependency("fakemodule.submodule")
+
+ with tm.assert_produces_warning(UserWarning):
+ result = import_optional_dependency("fakemodule.submodule", on_version="warn")
+ assert result is None
+
+ module.__version__ = "1.0.0" # exact match is OK
+ result = import_optional_dependency("fakemodule.submodule")
+ assert result is submodule
+
+
def test_no_version_raises(monkeypatch):
name = "fakemodule"
module = types.ModuleType(name)
| - [ ] closes #38888
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @jreback, @arw2019 | https://api.github.com/repos/pandas-dev/pandas/pulls/38925 | 2021-01-03T18:23:27Z | 2021-01-04T00:14:16Z | 2021-01-04T00:14:16Z | 2021-01-04T03:38:53Z |
TST: 26807 split pandas/tests/tseries/offsets/test_offsets.py into multiple smaller test modules | diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index 25837c0b6aee2..b2ac28e1865d6 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -1,6 +1,24 @@
"""
-Assertion helpers for offsets tests
+Assertion helpers and base class for offsets tests
"""
+from datetime import datetime
+from typing import Optional, Type
+
+from dateutil.tz.tz import tzlocal
+import pytest
+
+from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
+from pandas._libs.tslibs.offsets import (
+ FY5253,
+ BusinessHour,
+ CustomBusinessHour,
+ DateOffset,
+ FY5253Quarter,
+ LastWeekOfMonth,
+ Week,
+ WeekOfMonth,
+)
+from pandas.compat import IS64
def assert_offset_equal(offset, base, expected):
@@ -24,3 +42,156 @@ def assert_is_on_offset(offset, date, expected):
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
+
+
+class WeekDay:
+ MON = 0
+ TUE = 1
+ WED = 2
+ THU = 3
+ FRI = 4
+ SAT = 5
+ SUN = 6
+
+
+class Base:
+ _offset: Optional[Type[DateOffset]] = None
+ d = Timestamp(datetime(2008, 1, 2))
+
+ timezones = [
+ None,
+ "UTC",
+ "Asia/Tokyo",
+ "US/Eastern",
+ "dateutil/Asia/Tokyo",
+ "dateutil/US/Pacific",
+ ]
+
+ def _get_offset(self, klass, value=1, normalize=False):
+ # create instance from offset class
+ if klass is FY5253:
+ klass = klass(
+ n=value,
+ startingMonth=1,
+ weekday=1,
+ variation="last",
+ normalize=normalize,
+ )
+ elif klass is FY5253Quarter:
+ klass = klass(
+ n=value,
+ startingMonth=1,
+ weekday=1,
+ qtr_with_extra_week=1,
+ variation="last",
+ normalize=normalize,
+ )
+ elif klass is LastWeekOfMonth:
+ klass = klass(n=value, weekday=5, normalize=normalize)
+ elif klass is WeekOfMonth:
+ klass = klass(n=value, week=1, weekday=5, normalize=normalize)
+ elif klass is Week:
+ klass = klass(n=value, weekday=5, normalize=normalize)
+ elif klass is DateOffset:
+ klass = klass(days=value, normalize=normalize)
+ else:
+ klass = klass(value, normalize=normalize)
+ return klass
+
+ def test_apply_out_of_range(self, tz_naive_fixture):
+ tz = tz_naive_fixture
+ if self._offset is None:
+ return
+ if isinstance(tz, tzlocal) and not IS64:
+ pytest.xfail(reason="OverflowError inside tzlocal past 2038")
+
+ # try to create an out-of-bounds result timestamp; if we can't create
+ # the offset skip
+ try:
+ if self._offset in (BusinessHour, CustomBusinessHour):
+ # Using 10000 in BusinessHour fails in tz check because of DST
+ # difference
+ offset = self._get_offset(self._offset, value=100000)
+ else:
+ offset = self._get_offset(self._offset, value=10000)
+
+ result = Timestamp("20080101") + offset
+ assert isinstance(result, datetime)
+ assert result.tzinfo is None
+
+ # Check tz is preserved
+ t = Timestamp("20080101", tz=tz)
+ result = t + offset
+ assert isinstance(result, datetime)
+ assert t.tzinfo == result.tzinfo
+
+ except OutOfBoundsDatetime:
+ pass
+ except (ValueError, KeyError):
+ # we are creating an invalid offset
+ # so ignore
+ pass
+
+ def test_offsets_compare_equal(self):
+ # root cause of GH#456: __ne__ was not implemented
+ if self._offset is None:
+ return
+ offset1 = self._offset()
+ offset2 = self._offset()
+ assert not offset1 != offset2
+ assert offset1 == offset2
+
+ def test_rsub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d - self.offset2 == (-self.offset2).apply(self.d)
+
+ def test_radd(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d + self.offset2 == self.offset2 + self.d
+
+ def test_sub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ off = self.offset2
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
+ off - self.d
+
+ assert 2 * off - off == off
+ assert self.d - self.offset2 == self.d + self._offset(-2)
+ assert self.d - self.offset2 == self.d - (2 * off - off)
+
+ def testMult1(self):
+ if self._offset is None or not hasattr(self, "offset1"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset1 attr
+ return
+ assert self.d + 10 * self.offset1 == self.d + self._offset(10)
+ assert self.d + 5 * self.offset1 == self.d + self._offset(5)
+
+ def testMult2(self):
+ if self._offset is None:
+ return
+ assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
+ assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
+
+ def test_compare_str(self):
+ # GH#23524
+ # comparing to strings that cannot be cast to DateOffsets should
+ # not raise for __eq__ or __ne__
+ if self._offset is None:
+ return
+ off = self._get_offset(self._offset)
+
+ assert not off == "infer"
+ assert off != "foo"
+ # Note: inequalities are only implemented for Tick subclasses;
+ # tests for this are in test_ticks
diff --git a/pandas/tests/tseries/offsets/test_business_day.py b/pandas/tests/tseries/offsets/test_business_day.py
new file mode 100644
index 0000000000000..d3c4fb50e2ab0
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_business_day.py
@@ -0,0 +1,441 @@
+"""
+Tests for offsets.BDay
+"""
+from datetime import date, datetime, timedelta
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs.offsets import ApplyTypeError, BDay, BMonthEnd, CDay
+from pandas.compat.numpy import np_datetime64_compat
+
+from pandas import DatetimeIndex, _testing as tm, read_pickle
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
+
+from pandas.tseries import offsets as offsets
+from pandas.tseries.holiday import USFederalHolidayCalendar
+
+
+class TestBusinessDay(Base):
+ _offset = BDay
+
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+
+ self.offset = BDay()
+ self.offset1 = self.offset
+ self.offset2 = BDay(2)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<BusinessDay>"
+ assert repr(self.offset2) == "<2 * BusinessDays>"
+
+ expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 1, 3)
+
+ def testRollback1(self):
+ assert BDay(10).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
+
+ def testRollforward1(self):
+ assert BDay(10).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
+
+ def test_roll_date_object(self):
+ offset = BDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 14)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 17)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ def test_is_on_offset(self):
+ tests = [
+ (BDay(), datetime(2008, 1, 1), True),
+ (BDay(), datetime(2008, 1, 5), False),
+ ]
+
+ for offset, d, expected in tests:
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8),
+ },
+ ),
+ (
+ 2 * BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9),
+ },
+ ),
+ (
+ -BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ -2 * BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ BDay(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + BDay(10)
+ assert result == datetime(2012, 11, 6)
+
+ result = dt + BDay(100) - BDay(100)
+ assert result == dt
+
+ off = BDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ assert rs == xp
+
+ off = BDay() * 10
+ rs = datetime(2014, 1, 5) + off # see #5890
+ xp = datetime(2014, 1, 17)
+ assert rs == xp
+
+ def test_apply_corner(self):
+ msg = "Only know how to combine business day with datetime or timedelta"
+ with pytest.raises(ApplyTypeError, match=msg):
+ BDay().apply(BMonthEnd())
+
+
+class TestCustomBusinessDay(Base):
+ _offset = CDay
+
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+ self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
+
+ self.offset = CDay()
+ self.offset1 = self.offset
+ self.offset2 = CDay(2)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessDay>"
+ assert repr(self.offset2) == "<2 * CustomBusinessDays>"
+
+ expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 1, 3)
+ assert self.offset2(self.nd) == datetime(2008, 1, 3)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
+
+ def testRollforward1(self):
+ assert CDay(10).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
+
+ def test_roll_date_object(self):
+ offset = CDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 14)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 17)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CDay(), datetime(2008, 1, 1), True),
+ (CDay(), datetime(2008, 1, 5), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, d, expected = case
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8),
+ },
+ ),
+ (
+ 2 * CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9),
+ },
+ ),
+ (
+ -CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ -2 * CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ CDay(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CDay(10)
+ assert result == datetime(2012, 11, 6)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ assert rs == xp
+
+ def test_apply_corner(self):
+ msg = (
+ "Only know how to combine trading day "
+ "with datetime, datetime64 or timedelta"
+ )
+ with pytest.raises(ApplyTypeError, match=msg):
+ CDay().apply(BMonthEnd())
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ tday = CDay(holidays=holidays)
+ for year in range(2012, 2015):
+ dt = datetime(year, 4, 30)
+ xp = datetime(year, 5, 2)
+ rs = dt + tday
+ assert rs == xp
+
+ def test_weekmask(self):
+ weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend
+ weekmask_uae = "1111001" # Fri-Sat Weekend
+ weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
+ bday_saudi = CDay(weekmask=weekmask_saudi)
+ bday_uae = CDay(weekmask=weekmask_uae)
+ bday_egypt = CDay(weekmask=weekmask_egypt)
+ dt = datetime(2013, 5, 1)
+ xp_saudi = datetime(2013, 5, 4)
+ xp_uae = datetime(2013, 5, 2)
+ xp_egypt = datetime(2013, 5, 2)
+ assert xp_saudi == dt + bday_saudi
+ assert xp_uae == dt + bday_uae
+ assert xp_egypt == dt + bday_egypt
+ xp2 = datetime(2013, 5, 5)
+ assert xp2 == dt + 2 * bday_saudi
+ assert xp2 == dt + 2 * bday_uae
+ assert xp2 == dt + 2 * bday_egypt
+
+ def test_weekmask_and_holidays(self):
+ weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend
+ holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ xp_egypt = datetime(2013, 5, 5)
+ assert xp_egypt == dt + 2 * bday_egypt
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_calendar(self):
+ calendar = USFederalHolidayCalendar()
+ dt = datetime(2014, 1, 17)
+ assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
+
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = tm.round_trip_pickle(obj)
+ assert unpickled == obj
+
+ _check_roundtrip(self.offset)
+ _check_roundtrip(self.offset2)
+ _check_roundtrip(self.offset * 2)
+
+ def test_pickle_compat_0_14_1(self, datapath):
+ hdays = [datetime(2013, 1, 1) for ele in range(4)]
+ pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")
+ cday0_14_1 = read_pickle(pth)
+ cday = CDay(holidays=hdays)
+ assert cday == cday0_14_1
diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py
new file mode 100644
index 0000000000000..5f387b2edeb0b
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_business_hour.py
@@ -0,0 +1,905 @@
+"""
+Tests for offsets.BusinessHour
+"""
+from datetime import datetime, time as dt_time
+
+import pytest
+
+from pandas._libs.tslibs import Timedelta, Timestamp
+from pandas._libs.tslibs.offsets import BDay, BusinessHour, Nano
+
+from pandas import DatetimeIndex, _testing as tm, date_range
+from pandas.tests.tseries.offsets.common import Base, assert_offset_equal
+
+
+class TestBusinessHour(Base):
+ _offset = BusinessHour
+
+ def setup_method(self, method):
+ self.d = datetime(2014, 7, 1, 10, 00)
+
+ self.offset1 = BusinessHour()
+ self.offset2 = BusinessHour(n=3)
+
+ self.offset3 = BusinessHour(n=-1)
+ self.offset4 = BusinessHour(n=-4)
+
+ from datetime import time as dt_time
+
+ self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
+ self.offset6 = BusinessHour(start="20:00", end="05:00")
+ self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
+ self.offset8 = BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])
+ self.offset9 = BusinessHour(
+ n=3, start=["09:00", "22:00"], end=["13:00", "03:00"]
+ )
+ self.offset10 = BusinessHour(
+ n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"]
+ )
+
+ @pytest.mark.parametrize(
+ "start,end,match",
+ [
+ (
+ dt_time(11, 0, 5),
+ "17:00",
+ "time data must be specified only with hour and minute",
+ ),
+ ("AAA", "17:00", "time data must match '%H:%M' format"),
+ ("14:00:05", "17:00", "time data must match '%H:%M' format"),
+ ([], "17:00", "Must include at least 1 start time"),
+ ("09:00", [], "Must include at least 1 end time"),
+ (
+ ["09:00", "11:00"],
+ "17:00",
+ "number of starting time and ending time must be the same",
+ ),
+ (
+ ["09:00", "11:00"],
+ ["10:00"],
+ "number of starting time and ending time must be the same",
+ ),
+ (
+ ["09:00", "11:00"],
+ ["12:00", "20:00"],
+ r"invalid starting and ending time\(s\): opening hours should not "
+ "touch or overlap with one another",
+ ),
+ (
+ ["12:00", "20:00"],
+ ["09:00", "11:00"],
+ r"invalid starting and ending time\(s\): opening hours should not "
+ "touch or overlap with one another",
+ ),
+ ],
+ )
+ def test_constructor_errors(self, start, end, match):
+ with pytest.raises(ValueError, match=match):
+ BusinessHour(start=start, end=end)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset1) == "<BusinessHour: BH=09:00-17:00>"
+ assert repr(self.offset2) == "<3 * BusinessHours: BH=09:00-17:00>"
+ assert repr(self.offset3) == "<-1 * BusinessHour: BH=09:00-17:00>"
+ assert repr(self.offset4) == "<-4 * BusinessHours: BH=09:00-17:00>"
+
+ assert repr(self.offset5) == "<BusinessHour: BH=11:00-14:30>"
+ assert repr(self.offset6) == "<BusinessHour: BH=20:00-05:00>"
+ assert repr(self.offset7) == "<-2 * BusinessHours: BH=21:30-06:30>"
+ assert repr(self.offset8) == "<BusinessHour: BH=09:00-12:00,13:00-17:00>"
+ assert repr(self.offset9) == "<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>"
+ assert repr(self.offset10) == "<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>"
+
+ def test_with_offset(self):
+ expected = Timestamp("2014-07-01 13:00")
+
+ assert self.d + BusinessHour() * 3 == expected
+ assert self.d + BusinessHour(n=3) == expected
+
+ @pytest.mark.parametrize(
+ "offset_name",
+ ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
+ )
+ def test_eq_attribute(self, offset_name):
+ offset = getattr(self, offset_name)
+ assert offset == offset
+
+ @pytest.mark.parametrize(
+ "offset1,offset2",
+ [
+ (BusinessHour(start="09:00"), BusinessHour()),
+ (
+ BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),
+ BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
+ ),
+ ],
+ )
+ def test_eq(self, offset1, offset2):
+ assert offset1 == offset2
+
+ @pytest.mark.parametrize(
+ "offset1,offset2",
+ [
+ (BusinessHour(), BusinessHour(-1)),
+ (BusinessHour(start="09:00"), BusinessHour(start="09:01")),
+ (
+ BusinessHour(start="09:00", end="17:00"),
+ BusinessHour(start="17:00", end="09:01"),
+ ),
+ (
+ BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),
+ BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
+ ),
+ ],
+ )
+ def test_neq(self, offset1, offset2):
+ assert offset1 != offset2
+
+ @pytest.mark.parametrize(
+ "offset_name",
+ ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
+ )
+ def test_hash(self, offset_name):
+ offset = getattr(self, offset_name)
+ assert offset == offset
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
+ assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
+ assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
+ assert self.offset8(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset9(self.d) == datetime(2014, 7, 1, 22)
+ assert self.offset10(self.d) == datetime(2014, 7, 1, 1)
+
+ def test_sub(self):
+ # we have to override test_sub here because self.offset2 is not
+ # defined as self._offset(2)
+ off = self.offset2
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
+ off - self.d
+ assert 2 * off - off == off
+
+ assert self.d - self.offset2 == self.d + self._offset(-3)
+
+ def testRollback1(self):
+ assert self.offset1.rollback(self.d) == self.d
+ assert self.offset2.rollback(self.d) == self.d
+ assert self.offset3.rollback(self.d) == self.d
+ assert self.offset4.rollback(self.d) == self.d
+ assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
+ assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
+ assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
+ assert self.offset8.rollback(self.d) == self.d
+ assert self.offset9.rollback(self.d) == self.d
+ assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
+ assert self.offset6.rollback(d) == d
+ assert self.offset7.rollback(d) == d
+ assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset9.rollback(d) == d
+ assert self.offset10.rollback(d) == d
+
+ assert self._offset(5).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
+ 2014, 7, 4, 17, 0
+ )
+
+ def testRollforward1(self):
+ assert self.offset1.rollforward(self.d) == self.d
+ assert self.offset2.rollforward(self.d) == self.d
+ assert self.offset3.rollforward(self.d) == self.d
+ assert self.offset4.rollforward(self.d) == self.d
+ assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)
+ assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)
+ assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)
+ assert self.offset8.rollforward(self.d) == self.d
+ assert self.offset9.rollforward(self.d) == self.d
+ assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
+ assert self.offset6.rollforward(d) == d
+ assert self.offset7.rollforward(d) == d
+ assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset9.rollforward(d) == d
+ assert self.offset10.rollforward(d) == d
+
+ assert self._offset(5).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
+ 2014, 7, 7, 9
+ )
+
+ def test_roll_date_object(self):
+ offset = BusinessHour()
+
+ dt = datetime(2014, 7, 6, 15, 0)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2014, 7, 4, 17)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2014, 7, 7, 9)
+
+ normalize_cases = []
+ normalize_cases.append(
+ (
+ BusinessHour(normalize=True),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
+ },
+ )
+ )
+
+ normalize_cases.append(
+ (
+ BusinessHour(-1, normalize=True),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
+ },
+ )
+ )
+
+ normalize_cases.append(
+ (
+ BusinessHour(1, normalize=True, start="17:00", end="04:00"),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", normalize_cases)
+ def test_normalize(self, case):
+ offset, cases = case
+ for dt, expected in cases.items():
+ assert offset.apply(dt) == expected
+
+ on_offset_cases = []
+ on_offset_cases.append(
+ (
+ BusinessHour(),
+ {
+ datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start="10:00", end="15:00"),
+ {
+ datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),
+ {
+ datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False,
+ datetime(2014, 7, 1, 12, 30): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),
+ {
+ datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False,
+ datetime(2014, 7, 4, 22): False,
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, cases = case
+ for dt, expected in cases.items():
+ assert offset.is_on_offset(dt) == expected
+
+ apply_cases = [
+ (
+ BusinessHour(),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(4),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(-1),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(-4),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
+ },
+ ),
+ (
+ BusinessHour(n=2, start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
+ datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
+ datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),
+ },
+ ),
+ (
+ BusinessHour(n=-3, start="10:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
+ datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
+ datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
+ datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=4, start="00:00", end="23:00"),
+ {
+ datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),
+ datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),
+ datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),
+ datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),
+ datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),
+ datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),
+ },
+ ),
+ (
+ BusinessHour(n=-4, start="00:00", end="23:00"),
+ {
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),
+ datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),
+ datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),
+ datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),
+ datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),
+ },
+ ),
+ (
+ BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),
+ # out of business hours
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),
+ },
+ ),
+ ]
+
+ # long business hours (see gh-26381)
+
+ # multiple business hours
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ apply_large_n_cases = [
+ (
+ # A week later
+ BusinessHour(40),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),
+ },
+ ),
+ (
+ # 3 days and 1 hour before
+ BusinessHour(-25),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
+ },
+ ),
+ (
+ # 5 days and 3 hours later
+ BusinessHour(28, start="21:00", end="02:00"),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
+ },
+ ),
+ (
+ # large n for multiple opening hours (3 days and 1 hour before)
+ BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
+ },
+ ),
+ (
+ # 5 days and 3 hours later
+ BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_large_n_cases)
+ def test_apply_large_n(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_nanoseconds(self):
+ tests = [
+ (
+ BusinessHour(),
+ {
+ Timestamp("2014-07-04 15:00")
+ + Nano(5): Timestamp("2014-07-04 16:00")
+ + Nano(5),
+ Timestamp("2014-07-04 16:00")
+ + Nano(5): Timestamp("2014-07-07 09:00")
+ + Nano(5),
+ Timestamp("2014-07-04 16:00")
+ - Nano(5): Timestamp("2014-07-04 17:00")
+ - Nano(5),
+ },
+ ),
+ (
+ BusinessHour(-1),
+ {
+ Timestamp("2014-07-04 15:00")
+ + Nano(5): Timestamp("2014-07-04 14:00")
+ + Nano(5),
+ Timestamp("2014-07-04 10:00")
+ + Nano(5): Timestamp("2014-07-04 09:00")
+ + Nano(5),
+ Timestamp("2014-07-04 10:00")
+ - Nano(5): Timestamp("2014-07-03 17:00")
+ - Nano(5),
+ },
+ ),
+ ]
+
+ for offset, cases in tests:
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_datetimeindex(self):
+ idx1 = date_range(start="2014-07-04 15:00", end="2014-07-08 10:00", freq="BH")
+ idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="BH")
+ idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="BH")
+ expected = DatetimeIndex(
+ [
+ "2014-07-04 15:00",
+ "2014-07-04 16:00",
+ "2014-07-07 09:00",
+ "2014-07-07 10:00",
+ "2014-07-07 11:00",
+ "2014-07-07 12:00",
+ "2014-07-07 13:00",
+ "2014-07-07 14:00",
+ "2014-07-07 15:00",
+ "2014-07-07 16:00",
+ "2014-07-08 09:00",
+ "2014-07-08 10:00",
+ ],
+ freq="BH",
+ )
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+ idx1 = date_range(start="2014-07-04 15:45", end="2014-07-08 10:45", freq="BH")
+ idx2 = date_range(start="2014-07-04 15:45", periods=12, freq="BH")
+ idx3 = date_range(end="2014-07-08 10:45", periods=12, freq="BH")
+
+ expected = idx1
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+ def test_bday_ignores_timedeltas(self):
+ idx = date_range("2010/02/01", "2010/02/10", freq="12H")
+ t1 = idx + BDay(offset=Timedelta(3, unit="H"))
+
+ expected = DatetimeIndex(
+ [
+ "2010-02-02 03:00:00",
+ "2010-02-02 15:00:00",
+ "2010-02-03 03:00:00",
+ "2010-02-03 15:00:00",
+ "2010-02-04 03:00:00",
+ "2010-02-04 15:00:00",
+ "2010-02-05 03:00:00",
+ "2010-02-05 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-09 03:00:00",
+ "2010-02-09 15:00:00",
+ "2010-02-10 03:00:00",
+ "2010-02-10 15:00:00",
+ "2010-02-11 03:00:00",
+ ],
+ freq=None,
+ )
+ tm.assert_index_equal(t1, expected)
diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py
new file mode 100644
index 0000000000000..f05b286616572
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py
@@ -0,0 +1,293 @@
+"""
+Tests for offsets.CustomBusinessHour
+"""
+from datetime import datetime
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import BusinessHour, CustomBusinessHour, Nano
+
+import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import Base, assert_offset_equal
+
+
+class TestCustomBusinessHour(Base):
+ _offset = CustomBusinessHour
+ holidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]
+
+ def setup_method(self, method):
+ # 2014 Calendar to check custom holidays
+ # Sun Mon Tue Wed Thu Fri Sat
+ # 6/22 23 24 25 26 27 28
+ # 29 30 7/1 2 3 4 5
+ # 6 7 8 9 10 11 12
+ self.d = datetime(2014, 7, 1, 10, 00)
+ self.offset1 = CustomBusinessHour(weekmask="Tue Wed Thu Fri")
+
+ self.offset2 = CustomBusinessHour(holidays=self.holidays)
+
+ def test_constructor_errors(self):
+ from datetime import time as dt_time
+
+ msg = "time data must be specified only with hour and minute"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start=dt_time(11, 0, 5))
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start="AAA")
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start="14:00:05")
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset1) == "<CustomBusinessHour: CBH=09:00-17:00>"
+ assert repr(self.offset2) == "<CustomBusinessHour: CBH=09:00-17:00>"
+
+ def test_with_offset(self):
+ expected = Timestamp("2014-07-01 13:00")
+
+ assert self.d + CustomBusinessHour() * 3 == expected
+ assert self.d + CustomBusinessHour(n=3) == expected
+
+ def test_eq(self):
+ for offset in [self.offset1, self.offset2]:
+ assert offset == offset
+
+ assert CustomBusinessHour() != CustomBusinessHour(-1)
+ assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
+ assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
+ assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
+ start="17:00", end="09:01"
+ )
+
+ assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
+ weekmask="Mon Tue Wed Thu Fri"
+ )
+ assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
+ holidays=["2014-06-28"]
+ )
+
+ def test_sub(self):
+ # override the Base.test_sub implementation because self.offset2 is
+ # defined differently in this class than the test expects
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset1) == hash(self.offset1)
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
+
+ def testRollback1(self):
+ assert self.offset1.rollback(self.d) == self.d
+ assert self.offset2.rollback(self.d) == self.d
+
+ d = datetime(2014, 7, 1, 0)
+
+ # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
+ assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
+
+ # 2014/6/30 and 2014/6/27 are holidays
+ assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
+
+ def testRollback2(self):
+ assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
+ 2014, 7, 4, 17, 0
+ )
+
+ def testRollforward1(self):
+ assert self.offset1.rollforward(self.d) == self.d
+ assert self.offset2.rollforward(self.d) == self.d
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
+
+ def testRollforward2(self):
+ assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
+ 2014, 7, 7, 9
+ )
+
+ def test_roll_date_object(self):
+ offset = BusinessHour()
+
+ dt = datetime(2014, 7, 6, 15, 0)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2014, 7, 4, 17)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2014, 7, 7, 9)
+
+ normalize_cases = [
+ (
+ CustomBusinessHour(normalize=True, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
+ },
+ ),
+ (
+ CustomBusinessHour(-1, normalize=True, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
+ },
+ ),
+ (
+ CustomBusinessHour(
+ 1, normalize=True, start="17:00", end="04:00", holidays=holidays
+ ),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("norm_cases", normalize_cases)
+ def test_normalize(self, norm_cases):
+ offset, cases = norm_cases
+ for dt, expected in cases.items():
+ assert offset.apply(dt) == expected
+
+ def test_is_on_offset(self):
+ tests = [
+ (
+ CustomBusinessHour(start="10:00", end="15:00", holidays=self.holidays),
+ {
+ datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ ]
+
+ for offset, cases in tests:
+ for dt, expected in cases.items():
+ assert offset.is_on_offset(dt) == expected
+
+ apply_cases = [
+ (
+ CustomBusinessHour(holidays=holidays),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ CustomBusinessHour(4, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("apply_case", apply_cases)
+ def test_apply(self, apply_case):
+ offset, cases = apply_case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ nano_cases = [
+ (
+ CustomBusinessHour(holidays=holidays),
+ {
+ Timestamp("2014-07-01 15:00")
+ + Nano(5): Timestamp("2014-07-01 16:00")
+ + Nano(5),
+ Timestamp("2014-07-01 16:00")
+ + Nano(5): Timestamp("2014-07-03 09:00")
+ + Nano(5),
+ Timestamp("2014-07-01 16:00")
+ - Nano(5): Timestamp("2014-07-01 17:00")
+ - Nano(5),
+ },
+ ),
+ (
+ CustomBusinessHour(-1, holidays=holidays),
+ {
+ Timestamp("2014-07-01 15:00")
+ + Nano(5): Timestamp("2014-07-01 14:00")
+ + Nano(5),
+ Timestamp("2014-07-01 10:00")
+ + Nano(5): Timestamp("2014-07-01 09:00")
+ + Nano(5),
+ Timestamp("2014-07-01 10:00")
+ - Nano(5): Timestamp("2014-06-26 17:00")
+ - Nano(5),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("nano_case", nano_cases)
+ def test_apply_nanoseconds(self, nano_case):
+ offset, cases = nano_case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py
new file mode 100644
index 0000000000000..0ae94b6b57640
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_dst.py
@@ -0,0 +1,175 @@
+"""
+Tests for DateOffset additions over Daylight Savings Time
+"""
+from datetime import timedelta
+
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import (
+ BMonthBegin,
+ BMonthEnd,
+ BQuarterBegin,
+ BQuarterEnd,
+ BYearBegin,
+ BYearEnd,
+ CBMonthBegin,
+ CBMonthEnd,
+ DateOffset,
+ Day,
+ MonthBegin,
+ MonthEnd,
+ QuarterBegin,
+ QuarterEnd,
+ SemiMonthBegin,
+ SemiMonthEnd,
+ Week,
+ YearBegin,
+ YearEnd,
+)
+
+from pandas.tests.tseries.offsets.test_offsets import get_utc_offset_hours
+
+
+class TestDST:
+
+ # one microsecond before the DST transition
+ ts_pre_fallback = "2013-11-03 01:59:59.999999"
+ ts_pre_springfwd = "2013-03-10 01:59:59.999999"
+
+ # test both basic names and dateutil timezones
+ timezone_utc_offsets = {
+ "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},
+ "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},
+ }
+ valid_date_offsets_singular = [
+ "weekday",
+ "day",
+ "hour",
+ "minute",
+ "second",
+ "microsecond",
+ ]
+ valid_date_offsets_plural = [
+ "weeks",
+ "days",
+ "hours",
+ "minutes",
+ "seconds",
+ "milliseconds",
+ "microseconds",
+ ]
+
+ def _test_all_offsets(self, n, **kwds):
+ valid_offsets = (
+ self.valid_date_offsets_plural
+ if n > 1
+ else self.valid_date_offsets_singular
+ )
+
+ for name in valid_offsets:
+ self._test_offset(offset_name=name, offset_n=n, **kwds)
+
+ def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
+ offset = DateOffset(**{offset_name: offset_n})
+
+ t = tstart + offset
+ if expected_utc_offset is not None:
+ assert get_utc_offset_hours(t) == expected_utc_offset
+
+ if offset_name == "weeks":
+ # dates should match
+ assert t.date() == timedelta(days=7 * offset.kwds["weeks"]) + tstart.date()
+ # expect the same day of week, hour of day, minute, second, ...
+ assert (
+ t.dayofweek == tstart.dayofweek
+ and t.hour == tstart.hour
+ and t.minute == tstart.minute
+ and t.second == tstart.second
+ )
+ elif offset_name == "days":
+ # dates should match
+ assert timedelta(offset.kwds["days"]) + tstart.date() == t.date()
+ # expect the same hour of day, minute, second, ...
+ assert (
+ t.hour == tstart.hour
+ and t.minute == tstart.minute
+ and t.second == tstart.second
+ )
+ elif offset_name in self.valid_date_offsets_singular:
+ # expect the singular offset value to match between tstart and t
+ datepart_offset = getattr(
+ t, offset_name if offset_name != "weekday" else "dayofweek"
+ )
+ assert datepart_offset == offset.kwds[offset_name]
+ else:
+ # the offset should be the same as if it was done in UTC
+ assert t == (tstart.tz_convert("UTC") + offset).tz_convert("US/Pacific")
+
+ def _make_timestamp(self, string, hrs_offset, tz):
+ if hrs_offset >= 0:
+ offset_string = f"{hrs_offset:02d}00"
+ else:
+ offset_string = f"-{(hrs_offset * -1):02}00"
+ return Timestamp(string + offset_string).tz_convert(tz)
+
+ def test_springforward_plural(self):
+ # test moving from standard to daylight savings
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ hrs_post = utc_offsets["utc_offset_daylight"]
+ self._test_all_offsets(
+ n=3,
+ tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
+ expected_utc_offset=hrs_post,
+ )
+
+ def test_fallback_singular(self):
+ # in the case of singular offsets, we don't necessarily know which utc
+ # offset the new Timestamp will wind up in (the tz for 1 month may be
+ # different from 1 second) so we don't specify an expected_utc_offset
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ self._test_all_offsets(
+ n=1,
+ tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
+ expected_utc_offset=None,
+ )
+
+ def test_springforward_singular(self):
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ self._test_all_offsets(
+ n=1,
+ tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
+ expected_utc_offset=None,
+ )
+
+ offset_classes = {
+ MonthBegin: ["11/2/2012", "12/1/2012"],
+ MonthEnd: ["11/2/2012", "11/30/2012"],
+ BMonthBegin: ["11/2/2012", "12/3/2012"],
+ BMonthEnd: ["11/2/2012", "11/30/2012"],
+ CBMonthBegin: ["11/2/2012", "12/3/2012"],
+ CBMonthEnd: ["11/2/2012", "11/30/2012"],
+ SemiMonthBegin: ["11/2/2012", "11/15/2012"],
+ SemiMonthEnd: ["11/2/2012", "11/15/2012"],
+ Week: ["11/2/2012", "11/9/2012"],
+ YearBegin: ["11/2/2012", "1/1/2013"],
+ YearEnd: ["11/2/2012", "12/31/2012"],
+ BYearBegin: ["11/2/2012", "1/1/2013"],
+ BYearEnd: ["11/2/2012", "12/31/2012"],
+ QuarterBegin: ["11/2/2012", "12/1/2012"],
+ QuarterEnd: ["11/2/2012", "12/31/2012"],
+ BQuarterBegin: ["11/2/2012", "12/3/2012"],
+ BQuarterEnd: ["11/2/2012", "12/31/2012"],
+ Day: ["11/4/2012", "11/4/2012 23:00"],
+ }.items()
+
+ @pytest.mark.parametrize("tup", offset_classes)
+ def test_all_offset_classes(self, tup):
+ offset, test_values = tup
+
+ first = Timestamp(test_values[0], tz="US/Eastern") + offset()
+ second = Timestamp(test_values[1], tz="US/Eastern")
+ assert first == second
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 7713be67a7e05..14728314b8e20 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -10,13 +10,16 @@
from pandas import Timestamp
import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ WeekDay,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
from pandas.tseries.frequencies import get_offset
from pandas.tseries.offsets import FY5253, FY5253Quarter
-from .common import assert_is_on_offset, assert_offset_equal
-from .test_offsets import Base, WeekDay
-
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
diff --git a/pandas/tests/tseries/offsets/test_month.py b/pandas/tests/tseries/offsets/test_month.py
new file mode 100644
index 0000000000000..578af79084e09
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_month.py
@@ -0,0 +1,838 @@
+"""
+Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
+"""
+from datetime import date, datetime
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import (
+ CBMonthBegin,
+ CBMonthEnd,
+ CDay,
+ SemiMonthBegin,
+ SemiMonthEnd,
+)
+
+from pandas import DatetimeIndex, Series, _testing as tm, date_range
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
+
+from pandas.tseries import offsets as offsets
+from pandas.tseries.holiday import USFederalHolidayCalendar
+
+
+class CustomBusinessMonthBase:
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+
+ self.offset = self._offset()
+ self.offset1 = self.offset
+ self.offset2 = self._offset(2)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = tm.round_trip_pickle(obj)
+ assert unpickled == obj
+
+ _check_roundtrip(self._offset())
+ _check_roundtrip(self._offset(2))
+ _check_roundtrip(self._offset() * 2)
+
+ def test_copy(self):
+ # GH 17452
+ off = self._offset(weekmask="Mon Wed Fri")
+ assert off == off.copy()
+
+
+class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
+ _offset = CBMonthEnd
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessMonthEnd>"
+ assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 2, 29)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
+
+ def testRollback2(self):
+ assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
+
+ def testRollforward1(self):
+ assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
+
+ def test_roll_date_object(self):
+ offset = CBMonthEnd()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 8, 31)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 28)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CBMonthEnd(), datetime(2008, 1, 31), True),
+ (CBMonthEnd(), datetime(2008, 1, 1), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, d, expected = case
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29),
+ },
+ ),
+ (
+ 2 * CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 2, 7): datetime(2008, 3, 31),
+ },
+ ),
+ (
+ -CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 2, 8): datetime(2008, 1, 31),
+ },
+ ),
+ (
+ -2 * CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 11, 30),
+ datetime(2008, 2, 9): datetime(2007, 12, 31),
+ },
+ ),
+ (
+ CBMonthEnd(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CBMonthEnd(10)
+ assert result == datetime(2013, 7, 31)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CBMonthEnd() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 7, 29)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2012, 5, 31)
+ assert rs == xp
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
+ bm_offset = CBMonthEnd(holidays=holidays)
+ dt = datetime(2012, 1, 1)
+ assert dt + bm_offset == datetime(2012, 1, 30)
+ assert dt + 2 * bm_offset == datetime(2012, 2, 27)
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_datetimeindex(self):
+ from pandas.tseries.holiday import USFederalHolidayCalendar
+
+ hcal = USFederalHolidayCalendar()
+ freq = CBMonthEnd(calendar=hcal)
+
+ assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
+ 0
+ ] == datetime(2012, 1, 31)
+
+
+class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
+ _offset = CBMonthBegin
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessMonthBegin>"
+ assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 3, 3)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
+
+ def testRollback2(self):
+ assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
+
+ def testRollforward1(self):
+ assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
+
+ def test_roll_date_object(self):
+ offset = CBMonthBegin()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 3)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 10, 1)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CBMonthBegin(), datetime(2008, 1, 1), True),
+ (CBMonthBegin(), datetime(2008, 1, 31), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, dt, expected = case
+ assert_is_on_offset(offset, dt, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 2, 7): datetime(2008, 3, 3),
+ },
+ ),
+ (
+ 2 * CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 3, 3),
+ datetime(2008, 2, 7): datetime(2008, 4, 1),
+ },
+ ),
+ (
+ -CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 3),
+ datetime(2008, 2, 8): datetime(2008, 2, 1),
+ },
+ ),
+ (
+ -2 * CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 11, 1),
+ datetime(2008, 2, 9): datetime(2008, 1, 1),
+ },
+ ),
+ (
+ CBMonthBegin(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 7): datetime(2008, 2, 1),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CBMonthBegin(10)
+ assert result == datetime(2013, 8, 1)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CBMonthBegin() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 7, 1)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+
+ xp = datetime(2012, 6, 1)
+ assert rs == xp
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
+ bm_offset = CBMonthBegin(holidays=holidays)
+ dt = datetime(2012, 1, 1)
+
+ assert dt + bm_offset == datetime(2012, 1, 2)
+ assert dt + 2 * bm_offset == datetime(2012, 2, 3)
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_datetimeindex(self):
+ hcal = USFederalHolidayCalendar()
+ cbmb = CBMonthBegin(calendar=hcal)
+ assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
+ 0
+ ] == datetime(2012, 1, 3)
+
+
+class TestSemiMonthEnd(Base):
+ _offset = SemiMonthEnd
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_offset_whole_year(self):
+ dates = (
+ datetime(2007, 12, 31),
+ datetime(2008, 1, 15),
+ datetime(2008, 1, 31),
+ datetime(2008, 2, 15),
+ datetime(2008, 2, 29),
+ datetime(2008, 3, 15),
+ datetime(2008, 3, 31),
+ datetime(2008, 4, 15),
+ datetime(2008, 4, 30),
+ datetime(2008, 5, 15),
+ datetime(2008, 5, 31),
+ datetime(2008, 6, 15),
+ datetime(2008, 6, 30),
+ datetime(2008, 7, 15),
+ datetime(2008, 7, 31),
+ datetime(2008, 8, 15),
+ datetime(2008, 8, 31),
+ datetime(2008, 9, 15),
+ datetime(2008, 9, 30),
+ datetime(2008, 10, 15),
+ datetime(2008, 10, 31),
+ datetime(2008, 11, 15),
+ datetime(2008, 11, 30),
+ datetime(2008, 12, 15),
+ datetime(2008, 12, 31),
+ )
+
+ for base, exp_date in zip(dates[:-1], dates[1:]):
+ assert_offset_equal(SemiMonthEnd(), base, exp_date)
+
+ # ensure .apply_index works as expected
+ s = DatetimeIndex(dates[:-1])
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = SemiMonthEnd() + s
+
+ exp = DatetimeIndex(dates[1:])
+ tm.assert_index_equal(result, exp)
+
+ # ensure generating a range with DatetimeIndex gives same result
+ result = date_range(start=dates[0], end=dates[-1], freq="SM")
+ exp = DatetimeIndex(dates, freq="SM")
+ tm.assert_index_equal(result, exp)
+
+ offset_cases = []
+ offset_cases.append(
+ (
+ SemiMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(day_of_month=20),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 20),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 20),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 16): datetime(2008, 1, 31),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(0, day_of_month=16),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 16),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 16),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(2),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 11, 30),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-1),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 30): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-1, day_of_month=4),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2007, 1, 4): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-2),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 5, 31),
+ datetime(2008, 3, 15): datetime(2008, 2, 15),
+ datetime(2008, 12, 31): datetime(2008, 11, 30),
+ datetime(2006, 12, 29): datetime(2006, 11, 30),
+ datetime(2006, 12, 14): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_apply_index(self, case):
+ # https://github.com/pandas-dev/pandas/issues/34580
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+ exp = DatetimeIndex(cases.values())
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = offset + s
+ tm.assert_index_equal(result, exp)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = offset.apply_index(s)
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [
+ (datetime(2007, 12, 31), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 1), False),
+ (datetime(2008, 2, 29), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ dt, expected = case
+ assert_is_on_offset(SemiMonthEnd(), dt, expected)
+
+ @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
+ s = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthEnd()
+ result2 = SemiMonthEnd() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-31 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-29", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+ s = klass(
+ [
+ Timestamp("2000-01-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthEnd()
+ result2 = SemiMonthEnd() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+
+class TestSemiMonthBegin(Base):
+ _offset = SemiMonthBegin
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_offset_whole_year(self):
+ dates = (
+ datetime(2007, 12, 15),
+ datetime(2008, 1, 1),
+ datetime(2008, 1, 15),
+ datetime(2008, 2, 1),
+ datetime(2008, 2, 15),
+ datetime(2008, 3, 1),
+ datetime(2008, 3, 15),
+ datetime(2008, 4, 1),
+ datetime(2008, 4, 15),
+ datetime(2008, 5, 1),
+ datetime(2008, 5, 15),
+ datetime(2008, 6, 1),
+ datetime(2008, 6, 15),
+ datetime(2008, 7, 1),
+ datetime(2008, 7, 15),
+ datetime(2008, 8, 1),
+ datetime(2008, 8, 15),
+ datetime(2008, 9, 1),
+ datetime(2008, 9, 15),
+ datetime(2008, 10, 1),
+ datetime(2008, 10, 15),
+ datetime(2008, 11, 1),
+ datetime(2008, 11, 15),
+ datetime(2008, 12, 1),
+ datetime(2008, 12, 15),
+ )
+
+ for base, exp_date in zip(dates[:-1], dates[1:]):
+ assert_offset_equal(SemiMonthBegin(), base, exp_date)
+
+ # ensure .apply_index works as expected
+ s = DatetimeIndex(dates[:-1])
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = SemiMonthBegin() + s
+
+ exp = DatetimeIndex(dates[1:])
+ tm.assert_index_equal(result, exp)
+
+ # ensure generating a range with DatetimeIndex gives same result
+ result = date_range(start=dates[0], end=dates[-1], freq="SMS")
+ exp = DatetimeIndex(dates, freq="SMS")
+ tm.assert_index_equal(result, exp)
+
+ offset_cases = [
+ (
+ SemiMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(day_of_month=20),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20),
+ },
+ ),
+ (
+ SemiMonthBegin(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 2, 1),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 2): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(0, day_of_month=16),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 5): datetime(2007, 1, 16),
+ datetime(2007, 1, 1): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(2),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 1): datetime(2007, 1, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 2, 1),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 12, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(-1),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 6, 14): datetime(2008, 6, 1),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ },
+ ),
+ (
+ SemiMonthBegin(-1, day_of_month=4),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 4),
+ datetime(2007, 1, 4): datetime(2007, 1, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2006, 12, 2): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 4),
+ },
+ ),
+ (
+ SemiMonthBegin(-2),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 1),
+ datetime(2008, 6, 14): datetime(2008, 5, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 1),
+ datetime(2006, 12, 29): datetime(2006, 12, 1),
+ datetime(2006, 12, 15): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_apply_index(self, case):
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = offset + s
+
+ exp = DatetimeIndex(cases.values())
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [
+ (datetime(2007, 12, 1), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 31), False),
+ (datetime(2008, 2, 15), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ dt, expected = case
+ assert_is_on_offset(SemiMonthBegin(), dt, expected)
+
+ @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
+ s = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthBegin()
+ result2 = SemiMonthBegin() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-02-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-03-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+ s = klass(
+ [
+ Timestamp("2000-01-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthBegin()
+ result2 = SemiMonthBegin() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 325a5311829dc..b65f8084e4bec 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,43 +1,30 @@
-from datetime import date, datetime, time as dt_time, timedelta
-from typing import Dict, List, Optional, Tuple, Type
+"""
+Tests of pandas.tseries.offsets
+"""
+from datetime import datetime, timedelta
+from typing import Dict, List, Tuple
-from dateutil.tz import tzlocal
import numpy as np
import pytest
-from pandas._libs.tslibs import (
- NaT,
- OutOfBoundsDatetime,
- Timestamp,
- conversion,
- timezones,
-)
+from pandas._libs.tslibs import NaT, Timestamp, conversion, timezones
import pandas._libs.tslibs.offsets as liboffsets
-from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map
+from pandas._libs.tslibs.offsets import _get_offset, _offset_map
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
-from pandas.compat import IS64
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
-from pandas import DatetimeIndex, Series, Timedelta, date_range, read_pickle
+from pandas import DatetimeIndex
import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import Base, WeekDay, assert_offset_equal
-from pandas.tseries.holiday import USFederalHolidayCalendar
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import (
FY5253,
BaseOffset,
BDay,
- BMonthBegin,
BMonthEnd,
- BQuarterBegin,
- BQuarterEnd,
BusinessHour,
- BYearBegin,
- BYearEnd,
- CBMonthBegin,
- CBMonthEnd,
- CDay,
CustomBusinessDay,
CustomBusinessHour,
CustomBusinessMonthBegin,
@@ -48,182 +35,15 @@
FY5253Quarter,
LastWeekOfMonth,
MonthBegin,
- MonthEnd,
Nano,
- QuarterBegin,
- QuarterEnd,
- SemiMonthBegin,
- SemiMonthEnd,
Tick,
Week,
WeekOfMonth,
- YearBegin,
- YearEnd,
)
-from .common import assert_is_on_offset, assert_offset_equal
-
-
-class WeekDay:
- # TODO: Remove: This is not used outside of tests
- MON = 0
- TUE = 1
- WED = 2
- THU = 3
- FRI = 4
- SAT = 5
- SUN = 6
-
-
-#####
-# DateOffset Tests
-#####
_ApplyCases = List[Tuple[BaseOffset, Dict[datetime, datetime]]]
-class Base:
- _offset: Optional[Type[DateOffset]] = None
- d = Timestamp(datetime(2008, 1, 2))
-
- timezones = [
- None,
- "UTC",
- "Asia/Tokyo",
- "US/Eastern",
- "dateutil/Asia/Tokyo",
- "dateutil/US/Pacific",
- ]
-
- def _get_offset(self, klass, value=1, normalize=False):
- # create instance from offset class
- if klass is FY5253:
- klass = klass(
- n=value,
- startingMonth=1,
- weekday=1,
- variation="last",
- normalize=normalize,
- )
- elif klass is FY5253Quarter:
- klass = klass(
- n=value,
- startingMonth=1,
- weekday=1,
- qtr_with_extra_week=1,
- variation="last",
- normalize=normalize,
- )
- elif klass is LastWeekOfMonth:
- klass = klass(n=value, weekday=5, normalize=normalize)
- elif klass is WeekOfMonth:
- klass = klass(n=value, week=1, weekday=5, normalize=normalize)
- elif klass is Week:
- klass = klass(n=value, weekday=5, normalize=normalize)
- elif klass is DateOffset:
- klass = klass(days=value, normalize=normalize)
- else:
- klass = klass(value, normalize=normalize)
- return klass
-
- def test_apply_out_of_range(self, tz_naive_fixture):
- tz = tz_naive_fixture
- if self._offset is None:
- return
- if isinstance(tz, tzlocal) and not IS64:
- pytest.xfail(reason="OverflowError inside tzlocal past 2038")
-
- # try to create an out-of-bounds result timestamp; if we can't create
- # the offset skip
- try:
- if self._offset in (BusinessHour, CustomBusinessHour):
- # Using 10000 in BusinessHour fails in tz check because of DST
- # difference
- offset = self._get_offset(self._offset, value=100000)
- else:
- offset = self._get_offset(self._offset, value=10000)
-
- result = Timestamp("20080101") + offset
- assert isinstance(result, datetime)
- assert result.tzinfo is None
-
- # Check tz is preserved
- t = Timestamp("20080101", tz=tz)
- result = t + offset
- assert isinstance(result, datetime)
- assert t.tzinfo == result.tzinfo
-
- except OutOfBoundsDatetime:
- pass
- except (ValueError, KeyError):
- # we are creating an invalid offset
- # so ignore
- pass
-
- def test_offsets_compare_equal(self):
- # root cause of GH#456: __ne__ was not implemented
- if self._offset is None:
- return
- offset1 = self._offset()
- offset2 = self._offset()
- assert not offset1 != offset2
- assert offset1 == offset2
-
- def test_rsub(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def test_radd(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def test_sub(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- off = self.offset2
- msg = "Cannot subtract datetime from offset"
- with pytest.raises(TypeError, match=msg):
- off - self.d
-
- assert 2 * off - off == off
- assert self.d - self.offset2 == self.d + self._offset(-2)
- assert self.d - self.offset2 == self.d - (2 * off - off)
-
- def testMult1(self):
- if self._offset is None or not hasattr(self, "offset1"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset1 attr
- return
- assert self.d + 10 * self.offset1 == self.d + self._offset(10)
- assert self.d + 5 * self.offset1 == self.d + self._offset(5)
-
- def testMult2(self):
- if self._offset is None:
- return
- assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
- assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
-
- def test_compare_str(self):
- # GH#23524
- # comparing to strings that cannot be cast to DateOffsets should
- # not raise for __eq__ or __ne__
- if self._offset is None:
- return
- off = self._get_offset(self._offset)
-
- assert not off == "infer"
- assert off != "foo"
- # Note: inequalities are only implemented for Tick subclasses;
- # tests for this are in test_ticks
-
-
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
@@ -724,3327 +544,6 @@ def test_eq(self):
assert offset1 != offset2
-class TestBusinessDay(Base):
- _offset = BDay
-
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
-
- self.offset = BDay()
- self.offset1 = self.offset
- self.offset2 = BDay(2)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<BusinessDay>"
- assert repr(self.offset2) == "<2 * BusinessDays>"
-
- expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
- assert repr(self.offset + timedelta(1)) == expected
-
- def test_with_offset(self):
- offset = self.offset + timedelta(hours=2)
-
- assert (self.d + offset) == datetime(2008, 1, 2, 2)
-
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
-
- expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
- tm.assert_index_equal(result, expected)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 1, 3)
-
- def testRollback1(self):
- assert BDay(10).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
-
- def testRollforward1(self):
- assert BDay(10).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
-
- def test_roll_date_object(self):
- offset = BDay()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 14)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 17)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- def test_is_on_offset(self):
- tests = [
- (BDay(), datetime(2008, 1, 1), True),
- (BDay(), datetime(2008, 1, 5), False),
- ]
-
- for offset, d, expected in tests:
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- BDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * BDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9),
- },
- )
- )
-
- apply_cases.append(
- (
- -BDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * BDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- BDay(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + BDay(10)
- assert result == datetime(2012, 11, 6)
-
- result = dt + BDay(100) - BDay(100)
- assert result == dt
-
- off = BDay() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 12, 23)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2011, 12, 26)
- assert rs == xp
-
- off = BDay() * 10
- rs = datetime(2014, 1, 5) + off # see #5890
- xp = datetime(2014, 1, 17)
- assert rs == xp
-
- def test_apply_corner(self):
- msg = "Only know how to combine business day with datetime or timedelta"
- with pytest.raises(ApplyTypeError, match=msg):
- BDay().apply(BMonthEnd())
-
-
-class TestBusinessHour(Base):
- _offset = BusinessHour
-
- def setup_method(self, method):
- self.d = datetime(2014, 7, 1, 10, 00)
-
- self.offset1 = BusinessHour()
- self.offset2 = BusinessHour(n=3)
-
- self.offset3 = BusinessHour(n=-1)
- self.offset4 = BusinessHour(n=-4)
-
- from datetime import time as dt_time
-
- self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
- self.offset6 = BusinessHour(start="20:00", end="05:00")
- self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
- self.offset8 = BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])
- self.offset9 = BusinessHour(
- n=3, start=["09:00", "22:00"], end=["13:00", "03:00"]
- )
- self.offset10 = BusinessHour(
- n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"]
- )
-
- @pytest.mark.parametrize(
- "start,end,match",
- [
- (
- dt_time(11, 0, 5),
- "17:00",
- "time data must be specified only with hour and minute",
- ),
- ("AAA", "17:00", "time data must match '%H:%M' format"),
- ("14:00:05", "17:00", "time data must match '%H:%M' format"),
- ([], "17:00", "Must include at least 1 start time"),
- ("09:00", [], "Must include at least 1 end time"),
- (
- ["09:00", "11:00"],
- "17:00",
- "number of starting time and ending time must be the same",
- ),
- (
- ["09:00", "11:00"],
- ["10:00"],
- "number of starting time and ending time must be the same",
- ),
- (
- ["09:00", "11:00"],
- ["12:00", "20:00"],
- r"invalid starting and ending time\(s\): opening hours should not "
- "touch or overlap with one another",
- ),
- (
- ["12:00", "20:00"],
- ["09:00", "11:00"],
- r"invalid starting and ending time\(s\): opening hours should not "
- "touch or overlap with one another",
- ),
- ],
- )
- def test_constructor_errors(self, start, end, match):
- with pytest.raises(ValueError, match=match):
- BusinessHour(start=start, end=end)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset1) == "<BusinessHour: BH=09:00-17:00>"
- assert repr(self.offset2) == "<3 * BusinessHours: BH=09:00-17:00>"
- assert repr(self.offset3) == "<-1 * BusinessHour: BH=09:00-17:00>"
- assert repr(self.offset4) == "<-4 * BusinessHours: BH=09:00-17:00>"
-
- assert repr(self.offset5) == "<BusinessHour: BH=11:00-14:30>"
- assert repr(self.offset6) == "<BusinessHour: BH=20:00-05:00>"
- assert repr(self.offset7) == "<-2 * BusinessHours: BH=21:30-06:30>"
- assert repr(self.offset8) == "<BusinessHour: BH=09:00-12:00,13:00-17:00>"
- assert repr(self.offset9) == "<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>"
- assert repr(self.offset10) == "<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>"
-
- def test_with_offset(self):
- expected = Timestamp("2014-07-01 13:00")
-
- assert self.d + BusinessHour() * 3 == expected
- assert self.d + BusinessHour(n=3) == expected
-
- @pytest.mark.parametrize(
- "offset_name",
- ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
- )
- def test_eq_attribute(self, offset_name):
- offset = getattr(self, offset_name)
- assert offset == offset
-
- @pytest.mark.parametrize(
- "offset1,offset2",
- [
- (BusinessHour(start="09:00"), BusinessHour()),
- (
- BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),
- BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
- ),
- ],
- )
- def test_eq(self, offset1, offset2):
- assert offset1 == offset2
-
- @pytest.mark.parametrize(
- "offset1,offset2",
- [
- (BusinessHour(), BusinessHour(-1)),
- (BusinessHour(start="09:00"), BusinessHour(start="09:01")),
- (
- BusinessHour(start="09:00", end="17:00"),
- BusinessHour(start="17:00", end="09:01"),
- ),
- (
- BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),
- BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
- ),
- ],
- )
- def test_neq(self, offset1, offset2):
- assert offset1 != offset2
-
- @pytest.mark.parametrize(
- "offset_name",
- ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
- )
- def test_hash(self, offset_name):
- offset = getattr(self, offset_name)
- assert offset == offset
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
- assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
- assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
- assert self.offset8(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset9(self.d) == datetime(2014, 7, 1, 22)
- assert self.offset10(self.d) == datetime(2014, 7, 1, 1)
-
- def test_sub(self):
- # we have to override test_sub here because self.offset2 is not
- # defined as self._offset(2)
- off = self.offset2
- msg = "Cannot subtract datetime from offset"
- with pytest.raises(TypeError, match=msg):
- off - self.d
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d + self._offset(-3)
-
- def testRollback1(self):
- assert self.offset1.rollback(self.d) == self.d
- assert self.offset2.rollback(self.d) == self.d
- assert self.offset3.rollback(self.d) == self.d
- assert self.offset4.rollback(self.d) == self.d
- assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
- assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
- assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
- assert self.offset8.rollback(self.d) == self.d
- assert self.offset9.rollback(self.d) == self.d
- assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
- assert self.offset6.rollback(d) == d
- assert self.offset7.rollback(d) == d
- assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset9.rollback(d) == d
- assert self.offset10.rollback(d) == d
-
- assert self._offset(5).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
- 2014, 7, 4, 17, 0
- )
-
- def testRollforward1(self):
- assert self.offset1.rollforward(self.d) == self.d
- assert self.offset2.rollforward(self.d) == self.d
- assert self.offset3.rollforward(self.d) == self.d
- assert self.offset4.rollforward(self.d) == self.d
- assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)
- assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)
- assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)
- assert self.offset8.rollforward(self.d) == self.d
- assert self.offset9.rollforward(self.d) == self.d
- assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
- assert self.offset6.rollforward(d) == d
- assert self.offset7.rollforward(d) == d
- assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset9.rollforward(d) == d
- assert self.offset10.rollforward(d) == d
-
- assert self._offset(5).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
- 2014, 7, 7, 9
- )
-
- def test_roll_date_object(self):
- offset = BusinessHour()
-
- dt = datetime(2014, 7, 6, 15, 0)
-
- result = offset.rollback(dt)
- assert result == datetime(2014, 7, 4, 17)
-
- result = offset.rollforward(dt)
- assert result == datetime(2014, 7, 7, 9)
-
- normalize_cases = []
- normalize_cases.append(
- (
- BusinessHour(normalize=True),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
- },
- )
- )
-
- normalize_cases.append(
- (
- BusinessHour(-1, normalize=True),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
- },
- )
- )
-
- normalize_cases.append(
- (
- BusinessHour(1, normalize=True, start="17:00", end="04:00"),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", normalize_cases)
- def test_normalize(self, case):
- offset, cases = case
- for dt, expected in cases.items():
- assert offset.apply(dt) == expected
-
- on_offset_cases = []
- on_offset_cases.append(
- (
- BusinessHour(),
- {
- datetime(2014, 7, 1, 9): True,
- datetime(2014, 7, 1, 8, 59): False,
- datetime(2014, 7, 1, 8): False,
- datetime(2014, 7, 1, 17): True,
- datetime(2014, 7, 1, 17, 1): False,
- datetime(2014, 7, 1, 18): False,
- datetime(2014, 7, 5, 9): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start="10:00", end="15:00"),
- {
- datetime(2014, 7, 1, 9): False,
- datetime(2014, 7, 1, 10): True,
- datetime(2014, 7, 1, 15): True,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 9, 0): False,
- datetime(2014, 7, 1, 10, 0): False,
- datetime(2014, 7, 1, 15): False,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12, 0): False,
- datetime(2014, 7, 6, 12, 0): False,
- datetime(2014, 7, 1, 19, 0): True,
- datetime(2014, 7, 2, 0, 0): True,
- datetime(2014, 7, 4, 23): True,
- datetime(2014, 7, 5, 1): True,
- datetime(2014, 7, 5, 5, 0): True,
- datetime(2014, 7, 6, 23, 0): False,
- datetime(2014, 7, 7, 3, 0): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),
- {
- datetime(2014, 7, 1, 9): True,
- datetime(2014, 7, 1, 8, 59): False,
- datetime(2014, 7, 1, 8): False,
- datetime(2014, 7, 1, 17): True,
- datetime(2014, 7, 1, 17, 1): False,
- datetime(2014, 7, 1, 18): False,
- datetime(2014, 7, 5, 9): False,
- datetime(2014, 7, 6, 12): False,
- datetime(2014, 7, 1, 12, 30): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),
- {
- datetime(2014, 7, 1, 9, 0): False,
- datetime(2014, 7, 1, 10, 0): False,
- datetime(2014, 7, 1, 15): False,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12, 0): False,
- datetime(2014, 7, 6, 12, 0): False,
- datetime(2014, 7, 1, 19, 0): True,
- datetime(2014, 7, 2, 0, 0): True,
- datetime(2014, 7, 4, 23): True,
- datetime(2014, 7, 5, 1): True,
- datetime(2014, 7, 5, 5, 0): True,
- datetime(2014, 7, 6, 23, 0): False,
- datetime(2014, 7, 7, 3, 0): False,
- datetime(2014, 7, 4, 22): False,
- },
- )
- )
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, cases = case
- for dt, expected in cases.items():
- assert offset.is_on_offset(dt) == expected
-
- opening_time_cases = []
- # opening time should be affected by sign of n, not by n's value and
- # end
- opening_time_cases.append(
- (
- [
- BusinessHour(),
- BusinessHour(n=2),
- BusinessHour(n=4),
- BusinessHour(end="10:00"),
- BusinessHour(n=2, end="4:00"),
- BusinessHour(n=4, end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- # if timestamp is on opening time, next opening time is
- # as it is
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 3, 9),
- datetime(2014, 7, 2, 9),
- ),
- # 2014-07-05 is saturday
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 8, 9),
- datetime(2014, 7, 7, 9),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start="11:15"),
- BusinessHour(n=2, start="11:15"),
- BusinessHour(n=3, start="11:15"),
- BusinessHour(start="11:15", end="10:00"),
- BusinessHour(n=2, start="11:15", end="4:00"),
- BusinessHour(n=3, start="11:15", end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 11, 15),
- datetime(2014, 6, 30, 11, 15),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15, 1): (
- datetime(2014, 7, 3, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 11, 15),
- datetime(2014, 7, 3, 11, 15),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(-1),
- BusinessHour(n=-2),
- BusinessHour(n=-4),
- BusinessHour(n=-1, end="10:00"),
- BusinessHour(n=-2, end="4:00"),
- BusinessHour(n=-4, end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 3, 9),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 9): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 8, 9),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start="17:00", end="05:00"),
- BusinessHour(n=3, start="17:00", end="03:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 6, 30, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 4, 17): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 3, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 7, 17, 1): (
- datetime(2014, 7, 8, 17),
- datetime(2014, 7, 7, 17),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(-1, start="17:00", end="05:00"),
- BusinessHour(n=-2, start="17:00", end="03:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 16, 59): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 3, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 7, 18): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 8, 17),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),
- BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),
- BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),
- BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),
- BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 11, 15),
- datetime(2014, 6, 30, 15),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 11, 15): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15, 1): (
- datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 11, 15),
- datetime(2014, 7, 3, 15),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 12): (
- datetime(2014, 7, 7, 15),
- datetime(2014, 7, 7, 11, 15),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),
- BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 8),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 16, 59): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 8),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 7, 18): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 8, 8),
- ),
- },
- )
- )
-
- @pytest.mark.parametrize("case", opening_time_cases)
- def test_opening_time(self, case):
- _offsets, cases = case
- for offset in _offsets:
- for dt, (exp_next, exp_prev) in cases.items():
- assert offset._next_opening_time(dt) == exp_next
- assert offset._prev_opening_time(dt) == exp_prev
-
- apply_cases = []
- apply_cases.append(
- (
- BusinessHour(),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(4),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(-1),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
- datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(-4),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(start="13:00", end="16:00"),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=2, start="13:00", end="16:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
- datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start="13:00", end="16:00"),
- {
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
- datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-3, start="10:00", end="16:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
- datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
- datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
- datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
- datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
- datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
- datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),
- },
- )
- )
-
- # long business hours (see gh-26381)
- apply_cases.append(
- (
- BusinessHour(n=4, start="00:00", end="23:00"),
- {
- datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),
- datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),
- datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),
- datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),
- datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),
- datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-4, start="00:00", end="23:00"),
- {
- datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),
- datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),
- datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),
- datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),
- datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),
- },
- )
- )
-
- # multiple business hours
- apply_cases.append(
- (
- BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),
- # out of business hours
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),
- datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
- datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),
- datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),
- datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),
- datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- apply_large_n_cases = []
- # A week later
- apply_large_n_cases.append(
- (
- BusinessHour(40),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),
- },
- )
- )
-
- # 3 days and 1 hour before
- apply_large_n_cases.append(
- (
- BusinessHour(-25),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
- datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
- datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
- datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
- datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
- datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
- },
- )
- )
-
- # 5 days and 3 hours later
- apply_large_n_cases.append(
- (
- BusinessHour(28, start="21:00", end="02:00"),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
- datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
- datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
- datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
- },
- )
- )
-
- # large n for multiple opening hours (3 days and 1 hour before)
- apply_large_n_cases.append(
- (
- BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),
- datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),
- datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
- datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),
- datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),
- datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
- },
- )
- )
-
- # 5 days and 3 hours later
- apply_large_n_cases.append(
- (
- BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
- datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),
- datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
- datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),
- datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_large_n_cases)
- def test_apply_large_n(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_nanoseconds(self):
- tests = []
-
- tests.append(
- (
- BusinessHour(),
- {
- Timestamp("2014-07-04 15:00")
- + Nano(5): Timestamp("2014-07-04 16:00")
- + Nano(5),
- Timestamp("2014-07-04 16:00")
- + Nano(5): Timestamp("2014-07-07 09:00")
- + Nano(5),
- Timestamp("2014-07-04 16:00")
- - Nano(5): Timestamp("2014-07-04 17:00")
- - Nano(5),
- },
- )
- )
-
- tests.append(
- (
- BusinessHour(-1),
- {
- Timestamp("2014-07-04 15:00")
- + Nano(5): Timestamp("2014-07-04 14:00")
- + Nano(5),
- Timestamp("2014-07-04 10:00")
- + Nano(5): Timestamp("2014-07-04 09:00")
- + Nano(5),
- Timestamp("2014-07-04 10:00")
- - Nano(5): Timestamp("2014-07-03 17:00")
- - Nano(5),
- },
- )
- )
-
- for offset, cases in tests:
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_datetimeindex(self):
- idx1 = date_range(start="2014-07-04 15:00", end="2014-07-08 10:00", freq="BH")
- idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="BH")
- idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="BH")
- expected = DatetimeIndex(
- [
- "2014-07-04 15:00",
- "2014-07-04 16:00",
- "2014-07-07 09:00",
- "2014-07-07 10:00",
- "2014-07-07 11:00",
- "2014-07-07 12:00",
- "2014-07-07 13:00",
- "2014-07-07 14:00",
- "2014-07-07 15:00",
- "2014-07-07 16:00",
- "2014-07-08 09:00",
- "2014-07-08 10:00",
- ],
- freq="BH",
- )
- for idx in [idx1, idx2, idx3]:
- tm.assert_index_equal(idx, expected)
-
- idx1 = date_range(start="2014-07-04 15:45", end="2014-07-08 10:45", freq="BH")
- idx2 = date_range(start="2014-07-04 15:45", periods=12, freq="BH")
- idx3 = date_range(end="2014-07-08 10:45", periods=12, freq="BH")
-
- expected = DatetimeIndex(
- [
- "2014-07-04 15:45",
- "2014-07-04 16:45",
- "2014-07-07 09:45",
- "2014-07-07 10:45",
- "2014-07-07 11:45",
- "2014-07-07 12:45",
- "2014-07-07 13:45",
- "2014-07-07 14:45",
- "2014-07-07 15:45",
- "2014-07-07 16:45",
- "2014-07-08 09:45",
- "2014-07-08 10:45",
- ],
- freq="BH",
- )
- expected = idx1
- for idx in [idx1, idx2, idx3]:
- tm.assert_index_equal(idx, expected)
-
- def test_bday_ignores_timedeltas(self):
- idx = date_range("2010/02/01", "2010/02/10", freq="12H")
- t1 = idx + BDay(offset=Timedelta(3, unit="H"))
-
- expected = DatetimeIndex(
- [
- "2010-02-02 03:00:00",
- "2010-02-02 15:00:00",
- "2010-02-03 03:00:00",
- "2010-02-03 15:00:00",
- "2010-02-04 03:00:00",
- "2010-02-04 15:00:00",
- "2010-02-05 03:00:00",
- "2010-02-05 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-09 03:00:00",
- "2010-02-09 15:00:00",
- "2010-02-10 03:00:00",
- "2010-02-10 15:00:00",
- "2010-02-11 03:00:00",
- ],
- freq=None,
- )
- tm.assert_index_equal(t1, expected)
-
-
-class TestCustomBusinessHour(Base):
- _offset = CustomBusinessHour
- holidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]
-
- def setup_method(self, method):
- # 2014 Calendar to check custom holidays
- # Sun Mon Tue Wed Thu Fri Sat
- # 6/22 23 24 25 26 27 28
- # 29 30 7/1 2 3 4 5
- # 6 7 8 9 10 11 12
- self.d = datetime(2014, 7, 1, 10, 00)
- self.offset1 = CustomBusinessHour(weekmask="Tue Wed Thu Fri")
-
- self.offset2 = CustomBusinessHour(holidays=self.holidays)
-
- def test_constructor_errors(self):
- from datetime import time as dt_time
-
- msg = "time data must be specified only with hour and minute"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start=dt_time(11, 0, 5))
- msg = "time data must match '%H:%M' format"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start="AAA")
- msg = "time data must match '%H:%M' format"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start="14:00:05")
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset1) == "<CustomBusinessHour: CBH=09:00-17:00>"
- assert repr(self.offset2) == "<CustomBusinessHour: CBH=09:00-17:00>"
-
- def test_with_offset(self):
- expected = Timestamp("2014-07-01 13:00")
-
- assert self.d + CustomBusinessHour() * 3 == expected
- assert self.d + CustomBusinessHour(n=3) == expected
-
- def test_eq(self):
- for offset in [self.offset1, self.offset2]:
- assert offset == offset
-
- assert CustomBusinessHour() != CustomBusinessHour(-1)
- assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
- assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
- assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
- start="17:00", end="09:01"
- )
-
- assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
- weekmask="Mon Tue Wed Thu Fri"
- )
- assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
- holidays=["2014-06-28"]
- )
-
- def test_sub(self):
- # override the Base.test_sub implementation because self.offset2 is
- # defined differently in this class than the test expects
- pass
-
- def test_hash(self):
- assert hash(self.offset1) == hash(self.offset1)
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
-
- def testRollback1(self):
- assert self.offset1.rollback(self.d) == self.d
- assert self.offset2.rollback(self.d) == self.d
-
- d = datetime(2014, 7, 1, 0)
-
- # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
- assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
-
- # 2014/6/30 and 2014/6/27 are holidays
- assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
-
- def testRollback2(self):
- assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
- 2014, 7, 4, 17, 0
- )
-
- def testRollforward1(self):
- assert self.offset1.rollforward(self.d) == self.d
- assert self.offset2.rollforward(self.d) == self.d
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
-
- def testRollforward2(self):
- assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
- 2014, 7, 7, 9
- )
-
- def test_roll_date_object(self):
- offset = BusinessHour()
-
- dt = datetime(2014, 7, 6, 15, 0)
-
- result = offset.rollback(dt)
- assert result == datetime(2014, 7, 4, 17)
-
- result = offset.rollforward(dt)
- assert result == datetime(2014, 7, 7, 9)
-
- normalize_cases = []
- normalize_cases.append(
- (
- CustomBusinessHour(normalize=True, holidays=holidays),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
- },
- )
- )
-
- normalize_cases.append(
- (
- CustomBusinessHour(-1, normalize=True, holidays=holidays),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
- },
- )
- )
-
- normalize_cases.append(
- (
- CustomBusinessHour(
- 1, normalize=True, start="17:00", end="04:00", holidays=holidays
- ),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("norm_cases", normalize_cases)
- def test_normalize(self, norm_cases):
- offset, cases = norm_cases
- for dt, expected in cases.items():
- assert offset.apply(dt) == expected
-
- def test_is_on_offset(self):
- tests = []
-
- tests.append(
- (
- CustomBusinessHour(start="10:00", end="15:00", holidays=self.holidays),
- {
- datetime(2014, 7, 1, 9): False,
- datetime(2014, 7, 1, 10): True,
- datetime(2014, 7, 1, 15): True,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- for offset, cases in tests:
- for dt, expected in cases.items():
- assert offset.is_on_offset(dt) == expected
-
- apply_cases = []
- apply_cases.append(
- (
- CustomBusinessHour(holidays=holidays),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- CustomBusinessHour(4, holidays=holidays),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("apply_case", apply_cases)
- def test_apply(self, apply_case):
- offset, cases = apply_case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- nano_cases = []
- nano_cases.append(
- (
- CustomBusinessHour(holidays=holidays),
- {
- Timestamp("2014-07-01 15:00")
- + Nano(5): Timestamp("2014-07-01 16:00")
- + Nano(5),
- Timestamp("2014-07-01 16:00")
- + Nano(5): Timestamp("2014-07-03 09:00")
- + Nano(5),
- Timestamp("2014-07-01 16:00")
- - Nano(5): Timestamp("2014-07-01 17:00")
- - Nano(5),
- },
- )
- )
-
- nano_cases.append(
- (
- CustomBusinessHour(-1, holidays=holidays),
- {
- Timestamp("2014-07-01 15:00")
- + Nano(5): Timestamp("2014-07-01 14:00")
- + Nano(5),
- Timestamp("2014-07-01 10:00")
- + Nano(5): Timestamp("2014-07-01 09:00")
- + Nano(5),
- Timestamp("2014-07-01 10:00")
- - Nano(5): Timestamp("2014-06-26 17:00")
- - Nano(5),
- },
- )
- )
-
- @pytest.mark.parametrize("nano_case", nano_cases)
- def test_apply_nanoseconds(self, nano_case):
- offset, cases = nano_case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
-
-class TestCustomBusinessDay(Base):
- _offset = CDay
-
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
- self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
-
- self.offset = CDay()
- self.offset1 = self.offset
- self.offset2 = CDay(2)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessDay>"
- assert repr(self.offset2) == "<2 * CustomBusinessDays>"
-
- expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
- assert repr(self.offset + timedelta(1)) == expected
-
- def test_with_offset(self):
- offset = self.offset + timedelta(hours=2)
-
- assert (self.d + offset) == datetime(2008, 1, 2, 2)
-
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
-
- expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
- tm.assert_index_equal(result, expected)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 1, 3)
- assert self.offset2(self.nd) == datetime(2008, 1, 3)
-
- def testRollback1(self):
- assert CDay(10).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
-
- def testRollforward1(self):
- assert CDay(10).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
-
- def test_roll_date_object(self):
- offset = CDay()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 14)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 17)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CDay(), datetime(2008, 1, 1), True),
- (CDay(), datetime(2008, 1, 5), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, d, expected = case
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9),
- },
- )
- )
-
- apply_cases.append(
- (
- -CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- CDay(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CDay(10)
- assert result == datetime(2012, 11, 6)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CDay() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 12, 23)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2011, 12, 26)
- assert rs == xp
-
- def test_apply_corner(self):
- msg = (
- "Only know how to combine trading day "
- "with datetime, datetime64 or timedelta"
- )
- with pytest.raises(ApplyTypeError, match=msg):
- CDay().apply(BMonthEnd())
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
- tday = CDay(holidays=holidays)
- for year in range(2012, 2015):
- dt = datetime(year, 4, 30)
- xp = datetime(year, 5, 2)
- rs = dt + tday
- assert rs == xp
-
- def test_weekmask(self):
- weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend
- weekmask_uae = "1111001" # Fri-Sat Weekend
- weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
- bday_saudi = CDay(weekmask=weekmask_saudi)
- bday_uae = CDay(weekmask=weekmask_uae)
- bday_egypt = CDay(weekmask=weekmask_egypt)
- dt = datetime(2013, 5, 1)
- xp_saudi = datetime(2013, 5, 4)
- xp_uae = datetime(2013, 5, 2)
- xp_egypt = datetime(2013, 5, 2)
- assert xp_saudi == dt + bday_saudi
- assert xp_uae == dt + bday_uae
- assert xp_egypt == dt + bday_egypt
- xp2 = datetime(2013, 5, 5)
- assert xp2 == dt + 2 * bday_saudi
- assert xp2 == dt + 2 * bday_uae
- assert xp2 == dt + 2 * bday_egypt
-
- def test_weekmask_and_holidays(self):
- weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend
- holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
- bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
- dt = datetime(2013, 4, 30)
- xp_egypt = datetime(2013, 5, 5)
- assert xp_egypt == dt + 2 * bday_egypt
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_calendar(self):
- calendar = USFederalHolidayCalendar()
- dt = datetime(2014, 1, 17)
- assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
-
- def test_roundtrip_pickle(self):
- def _check_roundtrip(obj):
- unpickled = tm.round_trip_pickle(obj)
- assert unpickled == obj
-
- _check_roundtrip(self.offset)
- _check_roundtrip(self.offset2)
- _check_roundtrip(self.offset * 2)
-
- def test_pickle_compat_0_14_1(self, datapath):
- hdays = [datetime(2013, 1, 1) for ele in range(4)]
- pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")
- cday0_14_1 = read_pickle(pth)
- cday = CDay(holidays=hdays)
- assert cday == cday0_14_1
-
-
-class CustomBusinessMonthBase:
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
-
- self.offset = self._offset()
- self.offset1 = self.offset
- self.offset2 = self._offset(2)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_roundtrip_pickle(self):
- def _check_roundtrip(obj):
- unpickled = tm.round_trip_pickle(obj)
- assert unpickled == obj
-
- _check_roundtrip(self._offset())
- _check_roundtrip(self._offset(2))
- _check_roundtrip(self._offset() * 2)
-
- def test_copy(self):
- # GH 17452
- off = self._offset(weekmask="Mon Wed Fri")
- assert off == off.copy()
-
-
-class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
- _offset = CBMonthEnd
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessMonthEnd>"
- assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 2, 29)
-
- def testRollback1(self):
- assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
-
- def testRollback2(self):
- assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
-
- def testRollforward1(self):
- assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
-
- def test_roll_date_object(self):
- offset = CBMonthEnd()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 8, 31)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 28)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CBMonthEnd(), datetime(2008, 1, 31), True),
- (CBMonthEnd(), datetime(2008, 1, 1), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, d, expected = case
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 2, 7): datetime(2008, 3, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- -CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 2, 8): datetime(2008, 1, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2007, 11, 30),
- datetime(2008, 2, 9): datetime(2007, 12, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- CBMonthEnd(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CBMonthEnd(10)
- assert result == datetime(2013, 7, 31)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CBMonthEnd() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 7, 29)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2012, 5, 31)
- assert rs == xp
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
- bm_offset = CBMonthEnd(holidays=holidays)
- dt = datetime(2012, 1, 1)
- assert dt + bm_offset == datetime(2012, 1, 30)
- assert dt + 2 * bm_offset == datetime(2012, 2, 27)
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_datetimeindex(self):
- from pandas.tseries.holiday import USFederalHolidayCalendar
-
- hcal = USFederalHolidayCalendar()
- freq = CBMonthEnd(calendar=hcal)
-
- assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
- 0
- ] == datetime(2012, 1, 31)
-
-
-class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
- _offset = CBMonthBegin
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessMonthBegin>"
- assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 3, 3)
-
- def testRollback1(self):
- assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
-
- def testRollback2(self):
- assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
-
- def testRollforward1(self):
- assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
-
- def test_roll_date_object(self):
- offset = CBMonthBegin()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 3)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 10, 1)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CBMonthBegin(), datetime(2008, 1, 1), True),
- (CBMonthBegin(), datetime(2008, 1, 31), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, dt, expected = case
- assert_is_on_offset(offset, dt, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 2, 7): datetime(2008, 3, 3),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 3, 3),
- datetime(2008, 2, 7): datetime(2008, 4, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- -CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 3),
- datetime(2008, 2, 8): datetime(2008, 2, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2007, 11, 1),
- datetime(2008, 2, 9): datetime(2008, 1, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- CBMonthBegin(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 7): datetime(2008, 2, 1),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CBMonthBegin(10)
- assert result == datetime(2013, 8, 1)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CBMonthBegin() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 7, 1)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
-
- xp = datetime(2012, 6, 1)
- assert rs == xp
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
- bm_offset = CBMonthBegin(holidays=holidays)
- dt = datetime(2012, 1, 1)
-
- assert dt + bm_offset == datetime(2012, 1, 2)
- assert dt + 2 * bm_offset == datetime(2012, 2, 3)
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_datetimeindex(self):
- hcal = USFederalHolidayCalendar()
- cbmb = CBMonthBegin(calendar=hcal)
- assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
- 0
- ] == datetime(2012, 1, 3)
-
-
-class TestWeek(Base):
- _offset = Week
- d = Timestamp(datetime(2008, 1, 2))
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_repr(self):
- assert repr(Week(weekday=0)) == "<Week: weekday=0>"
- assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
- assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
-
- def test_corner(self):
- with pytest.raises(ValueError, match="Day must be"):
- Week(weekday=7)
-
- with pytest.raises(ValueError, match="Day must be"):
- Week(weekday=-1)
-
- def test_is_anchored(self):
- assert Week(weekday=0).is_anchored()
- assert not Week().is_anchored()
- assert not Week(2, weekday=2).is_anchored()
- assert not Week(2).is_anchored()
-
- offset_cases = []
- # not business week
- offset_cases.append(
- (
- Week(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 8),
- datetime(2008, 1, 4): datetime(2008, 1, 11),
- datetime(2008, 1, 5): datetime(2008, 1, 12),
- datetime(2008, 1, 6): datetime(2008, 1, 13),
- datetime(2008, 1, 7): datetime(2008, 1, 14),
- },
- )
- )
-
- # Mon
- offset_cases.append(
- (
- Week(weekday=0),
- {
- datetime(2007, 12, 31): datetime(2008, 1, 7),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 14),
- },
- )
- )
-
- # n=0 -> roll forward. Mon
- offset_cases.append(
- (
- Week(0, weekday=0),
- {
- datetime(2007, 12, 31): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- # n=0 -> roll forward. Mon
- offset_cases.append(
- (
- Week(-2, weekday=1),
- {
- datetime(2010, 4, 6): datetime(2010, 3, 23),
- datetime(2010, 4, 8): datetime(2010, 3, 30),
- datetime(2010, 4, 5): datetime(2010, 3, 23),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("weekday", range(7))
- def test_is_on_offset(self, weekday):
- offset = Week(weekday=weekday)
-
- for day in range(1, 8):
- date = datetime(2008, 1, day)
-
- if day % 7 == weekday:
- expected = True
- else:
- expected = False
- assert_is_on_offset(offset, date, expected)
-
-
-class TestWeekOfMonth(Base):
- _offset = WeekOfMonth
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_constructor(self):
- with pytest.raises(ValueError, match="^Week"):
- WeekOfMonth(n=1, week=4, weekday=0)
-
- with pytest.raises(ValueError, match="^Week"):
- WeekOfMonth(n=1, week=-1, weekday=0)
-
- with pytest.raises(ValueError, match="^Day"):
- WeekOfMonth(n=1, week=0, weekday=-1)
-
- with pytest.raises(ValueError, match="^Day"):
- WeekOfMonth(n=1, week=0, weekday=-7)
-
- def test_repr(self):
- assert (
- repr(WeekOfMonth(weekday=1, week=2)) == "<WeekOfMonth: week=2, weekday=1>"
- )
-
- def test_offset(self):
- date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
- date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
- date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
- date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
-
- # see for loop for structure
- test_cases = [
- (-2, 2, 1, date1, datetime(2010, 11, 16)),
- (-2, 2, 1, date2, datetime(2010, 11, 16)),
- (-2, 2, 1, date3, datetime(2010, 11, 16)),
- (-2, 2, 1, date4, datetime(2010, 12, 21)),
- (-1, 2, 1, date1, datetime(2010, 12, 21)),
- (-1, 2, 1, date2, datetime(2010, 12, 21)),
- (-1, 2, 1, date3, datetime(2010, 12, 21)),
- (-1, 2, 1, date4, datetime(2011, 1, 18)),
- (0, 0, 1, date1, datetime(2011, 1, 4)),
- (0, 0, 1, date2, datetime(2011, 2, 1)),
- (0, 0, 1, date3, datetime(2011, 2, 1)),
- (0, 0, 1, date4, datetime(2011, 2, 1)),
- (0, 1, 1, date1, datetime(2011, 1, 11)),
- (0, 1, 1, date2, datetime(2011, 1, 11)),
- (0, 1, 1, date3, datetime(2011, 2, 8)),
- (0, 1, 1, date4, datetime(2011, 2, 8)),
- (0, 0, 1, date1, datetime(2011, 1, 4)),
- (0, 1, 1, date2, datetime(2011, 1, 11)),
- (0, 2, 1, date3, datetime(2011, 1, 18)),
- (0, 3, 1, date4, datetime(2011, 1, 25)),
- (1, 0, 0, date1, datetime(2011, 2, 7)),
- (1, 0, 0, date2, datetime(2011, 2, 7)),
- (1, 0, 0, date3, datetime(2011, 2, 7)),
- (1, 0, 0, date4, datetime(2011, 2, 7)),
- (1, 0, 1, date1, datetime(2011, 2, 1)),
- (1, 0, 1, date2, datetime(2011, 2, 1)),
- (1, 0, 1, date3, datetime(2011, 2, 1)),
- (1, 0, 1, date4, datetime(2011, 2, 1)),
- (1, 0, 2, date1, datetime(2011, 1, 5)),
- (1, 0, 2, date2, datetime(2011, 2, 2)),
- (1, 0, 2, date3, datetime(2011, 2, 2)),
- (1, 0, 2, date4, datetime(2011, 2, 2)),
- (1, 2, 1, date1, datetime(2011, 1, 18)),
- (1, 2, 1, date2, datetime(2011, 1, 18)),
- (1, 2, 1, date3, datetime(2011, 2, 15)),
- (1, 2, 1, date4, datetime(2011, 2, 15)),
- (2, 2, 1, date1, datetime(2011, 2, 15)),
- (2, 2, 1, date2, datetime(2011, 2, 15)),
- (2, 2, 1, date3, datetime(2011, 3, 15)),
- (2, 2, 1, date4, datetime(2011, 3, 15)),
- ]
-
- for n, week, weekday, dt, expected in test_cases:
- offset = WeekOfMonth(n, week=week, weekday=weekday)
- assert_offset_equal(offset, dt, expected)
-
- # try subtracting
- result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
- assert result == datetime(2011, 1, 12)
-
- result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
- assert result == datetime(2011, 2, 2)
-
- on_offset_cases = [
- (0, 0, datetime(2011, 2, 7), True),
- (0, 0, datetime(2011, 2, 6), False),
- (0, 0, datetime(2011, 2, 14), False),
- (1, 0, datetime(2011, 2, 14), True),
- (0, 1, datetime(2011, 2, 1), True),
- (0, 1, datetime(2011, 2, 8), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- week, weekday, dt, expected = case
- offset = WeekOfMonth(week=week, weekday=weekday)
- assert offset.is_on_offset(dt) == expected
-
-
-class TestLastWeekOfMonth(Base):
- _offset = LastWeekOfMonth
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_constructor(self):
- with pytest.raises(ValueError, match="^N cannot be 0"):
- LastWeekOfMonth(n=0, weekday=1)
-
- with pytest.raises(ValueError, match="^Day"):
- LastWeekOfMonth(n=1, weekday=-1)
-
- with pytest.raises(ValueError, match="^Day"):
- LastWeekOfMonth(n=1, weekday=7)
-
- def test_offset(self):
- # Saturday
- last_sat = datetime(2013, 8, 31)
- next_sat = datetime(2013, 9, 28)
- offset_sat = LastWeekOfMonth(n=1, weekday=5)
-
- one_day_before = last_sat + timedelta(days=-1)
- assert one_day_before + offset_sat == last_sat
-
- one_day_after = last_sat + timedelta(days=+1)
- assert one_day_after + offset_sat == next_sat
-
- # Test On that day
- assert last_sat + offset_sat == next_sat
-
- # Thursday
-
- offset_thur = LastWeekOfMonth(n=1, weekday=3)
- last_thurs = datetime(2013, 1, 31)
- next_thurs = datetime(2013, 2, 28)
-
- one_day_before = last_thurs + timedelta(days=-1)
- assert one_day_before + offset_thur == last_thurs
-
- one_day_after = last_thurs + timedelta(days=+1)
- assert one_day_after + offset_thur == next_thurs
-
- # Test on that day
- assert last_thurs + offset_thur == next_thurs
-
- three_before = last_thurs + timedelta(days=-3)
- assert three_before + offset_thur == last_thurs
-
- two_after = last_thurs + timedelta(days=+2)
- assert two_after + offset_thur == next_thurs
-
- offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
- assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
-
- on_offset_cases = [
- (WeekDay.SUN, datetime(2013, 1, 27), True),
- (WeekDay.SAT, datetime(2013, 3, 30), True),
- (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
- (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
- (WeekDay.MON, datetime(2013, 2, 25), True),
- (WeekDay.SAT, datetime(2013, 11, 30), True),
- (WeekDay.SAT, datetime(2006, 8, 26), True),
- (WeekDay.SAT, datetime(2007, 8, 25), True),
- (WeekDay.SAT, datetime(2008, 8, 30), True),
- (WeekDay.SAT, datetime(2009, 8, 29), True),
- (WeekDay.SAT, datetime(2010, 8, 28), True),
- (WeekDay.SAT, datetime(2011, 8, 27), True),
- (WeekDay.SAT, datetime(2019, 8, 31), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- weekday, dt, expected = case
- offset = LastWeekOfMonth(weekday=weekday)
- assert offset.is_on_offset(dt) == expected
-
- def test_repr(self):
- assert (
- repr(LastWeekOfMonth(n=2, weekday=1)) == "<2 * LastWeekOfMonths: weekday=1>"
- )
-
-
-class TestSemiMonthEnd(Base):
- _offset = SemiMonthEnd
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_offset_whole_year(self):
- dates = (
- datetime(2007, 12, 31),
- datetime(2008, 1, 15),
- datetime(2008, 1, 31),
- datetime(2008, 2, 15),
- datetime(2008, 2, 29),
- datetime(2008, 3, 15),
- datetime(2008, 3, 31),
- datetime(2008, 4, 15),
- datetime(2008, 4, 30),
- datetime(2008, 5, 15),
- datetime(2008, 5, 31),
- datetime(2008, 6, 15),
- datetime(2008, 6, 30),
- datetime(2008, 7, 15),
- datetime(2008, 7, 31),
- datetime(2008, 8, 15),
- datetime(2008, 8, 31),
- datetime(2008, 9, 15),
- datetime(2008, 9, 30),
- datetime(2008, 10, 15),
- datetime(2008, 10, 31),
- datetime(2008, 11, 15),
- datetime(2008, 11, 30),
- datetime(2008, 12, 15),
- datetime(2008, 12, 31),
- )
-
- for base, exp_date in zip(dates[:-1], dates[1:]):
- assert_offset_equal(SemiMonthEnd(), base, exp_date)
-
- # ensure .apply_index works as expected
- s = DatetimeIndex(dates[:-1])
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = SemiMonthEnd() + s
-
- exp = DatetimeIndex(dates[1:])
- tm.assert_index_equal(result, exp)
-
- # ensure generating a range with DatetimeIndex gives same result
- result = date_range(start=dates[0], end=dates[-1], freq="SM")
- exp = DatetimeIndex(dates, freq="SM")
- tm.assert_index_equal(result, exp)
-
- offset_cases = []
- offset_cases.append(
- (
- SemiMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(day_of_month=20),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 20),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 20),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 16): datetime(2008, 1, 31),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(0, day_of_month=16),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 16),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 16),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(2),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 11, 30),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-1),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 30): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-1, day_of_month=4),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2007, 1, 4): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-2),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 5, 31),
- datetime(2008, 3, 15): datetime(2008, 2, 15),
- datetime(2008, 12, 31): datetime(2008, 11, 30),
- datetime(2006, 12, 29): datetime(2006, 11, 30),
- datetime(2006, 12, 14): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_apply_index(self, case):
- # https://github.com/pandas-dev/pandas/issues/34580
- offset, cases = case
- s = DatetimeIndex(cases.keys())
- exp = DatetimeIndex(cases.values())
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = offset + s
- tm.assert_index_equal(result, exp)
-
- with tm.assert_produces_warning(FutureWarning):
- result = offset.apply_index(s)
- tm.assert_index_equal(result, exp)
-
- on_offset_cases = [
- (datetime(2007, 12, 31), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 1), False),
- (datetime(2008, 2, 29), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- dt, expected = case
- assert_is_on_offset(SemiMonthEnd(), dt, expected)
-
- @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
- def test_vectorized_offset_addition(self, klass):
- s = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthEnd()
- result2 = SemiMonthEnd() + s
-
- exp = klass(
- [
- Timestamp("2000-01-31 00:15:00", tz="US/Central"),
- Timestamp("2000-02-29", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
- s = klass(
- [
- Timestamp("2000-01-01 00:15:00", tz="US/Central"),
- Timestamp("2000-02-01", tz="US/Central"),
- ],
- name="a",
- )
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthEnd()
- result2 = SemiMonthEnd() + s
-
- exp = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
-
-class TestSemiMonthBegin(Base):
- _offset = SemiMonthBegin
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_offset_whole_year(self):
- dates = (
- datetime(2007, 12, 15),
- datetime(2008, 1, 1),
- datetime(2008, 1, 15),
- datetime(2008, 2, 1),
- datetime(2008, 2, 15),
- datetime(2008, 3, 1),
- datetime(2008, 3, 15),
- datetime(2008, 4, 1),
- datetime(2008, 4, 15),
- datetime(2008, 5, 1),
- datetime(2008, 5, 15),
- datetime(2008, 6, 1),
- datetime(2008, 6, 15),
- datetime(2008, 7, 1),
- datetime(2008, 7, 15),
- datetime(2008, 8, 1),
- datetime(2008, 8, 15),
- datetime(2008, 9, 1),
- datetime(2008, 9, 15),
- datetime(2008, 10, 1),
- datetime(2008, 10, 15),
- datetime(2008, 11, 1),
- datetime(2008, 11, 15),
- datetime(2008, 12, 1),
- datetime(2008, 12, 15),
- )
-
- for base, exp_date in zip(dates[:-1], dates[1:]):
- assert_offset_equal(SemiMonthBegin(), base, exp_date)
-
- # ensure .apply_index works as expected
- s = DatetimeIndex(dates[:-1])
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = SemiMonthBegin() + s
-
- exp = DatetimeIndex(dates[1:])
- tm.assert_index_equal(result, exp)
-
- # ensure generating a range with DatetimeIndex gives same result
- result = date_range(start=dates[0], end=dates[-1], freq="SMS")
- exp = DatetimeIndex(dates, freq="SMS")
- tm.assert_index_equal(result, exp)
-
- offset_cases = []
- offset_cases.append(
- (
- SemiMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(day_of_month=20),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 2, 1),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 2): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(0, day_of_month=16),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 5): datetime(2007, 1, 16),
- datetime(2007, 1, 1): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(2),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 1): datetime(2007, 1, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 2, 1),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 12, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-1),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 6, 14): datetime(2008, 6, 1),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-1, day_of_month=4),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 4),
- datetime(2007, 1, 4): datetime(2007, 1, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2006, 12, 2): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 4),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-2),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 1),
- datetime(2008, 6, 14): datetime(2008, 5, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 1),
- datetime(2006, 12, 29): datetime(2006, 12, 1),
- datetime(2006, 12, 15): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 1),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_apply_index(self, case):
- offset, cases = case
- s = DatetimeIndex(cases.keys())
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = offset + s
-
- exp = DatetimeIndex(cases.values())
- tm.assert_index_equal(result, exp)
-
- on_offset_cases = [
- (datetime(2007, 12, 1), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 31), False),
- (datetime(2008, 2, 15), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- dt, expected = case
- assert_is_on_offset(SemiMonthBegin(), dt, expected)
-
- @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
- def test_vectorized_offset_addition(self, klass):
- s = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthBegin()
- result2 = SemiMonthBegin() + s
-
- exp = klass(
- [
- Timestamp("2000-02-01 00:15:00", tz="US/Central"),
- Timestamp("2000-03-01", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
- s = klass(
- [
- Timestamp("2000-01-01 00:15:00", tz="US/Central"),
- Timestamp("2000-02-01", tz="US/Central"),
- ],
- name="a",
- )
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthBegin()
- result2 = SemiMonthBegin() + s
-
- exp = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
-
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
@@ -4208,153 +707,6 @@ def get_utc_offset_hours(ts):
return (o.days * 24 * 3600 + o.seconds) / 3600.0
-class TestDST:
- """
- test DateOffset additions over Daylight Savings Time
- """
-
- # one microsecond before the DST transition
- ts_pre_fallback = "2013-11-03 01:59:59.999999"
- ts_pre_springfwd = "2013-03-10 01:59:59.999999"
-
- # test both basic names and dateutil timezones
- timezone_utc_offsets = {
- "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},
- "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},
- }
- valid_date_offsets_singular = [
- "weekday",
- "day",
- "hour",
- "minute",
- "second",
- "microsecond",
- ]
- valid_date_offsets_plural = [
- "weeks",
- "days",
- "hours",
- "minutes",
- "seconds",
- "milliseconds",
- "microseconds",
- ]
-
- def _test_all_offsets(self, n, **kwds):
- valid_offsets = (
- self.valid_date_offsets_plural
- if n > 1
- else self.valid_date_offsets_singular
- )
-
- for name in valid_offsets:
- self._test_offset(offset_name=name, offset_n=n, **kwds)
-
- def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
- offset = DateOffset(**{offset_name: offset_n})
-
- t = tstart + offset
- if expected_utc_offset is not None:
- assert get_utc_offset_hours(t) == expected_utc_offset
-
- if offset_name == "weeks":
- # dates should match
- assert t.date() == timedelta(days=7 * offset.kwds["weeks"]) + tstart.date()
- # expect the same day of week, hour of day, minute, second, ...
- assert (
- t.dayofweek == tstart.dayofweek
- and t.hour == tstart.hour
- and t.minute == tstart.minute
- and t.second == tstart.second
- )
- elif offset_name == "days":
- # dates should match
- assert timedelta(offset.kwds["days"]) + tstart.date() == t.date()
- # expect the same hour of day, minute, second, ...
- assert (
- t.hour == tstart.hour
- and t.minute == tstart.minute
- and t.second == tstart.second
- )
- elif offset_name in self.valid_date_offsets_singular:
- # expect the singular offset value to match between tstart and t
- datepart_offset = getattr(
- t, offset_name if offset_name != "weekday" else "dayofweek"
- )
- assert datepart_offset == offset.kwds[offset_name]
- else:
- # the offset should be the same as if it was done in UTC
- assert t == (tstart.tz_convert("UTC") + offset).tz_convert("US/Pacific")
-
- def _make_timestamp(self, string, hrs_offset, tz):
- if hrs_offset >= 0:
- offset_string = f"{hrs_offset:02d}00"
- else:
- offset_string = f"-{(hrs_offset * -1):02}00"
- return Timestamp(string + offset_string).tz_convert(tz)
-
- def test_springforward_plural(self):
- # test moving from standard to daylight savings
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- hrs_post = utc_offsets["utc_offset_daylight"]
- self._test_all_offsets(
- n=3,
- tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
- expected_utc_offset=hrs_post,
- )
-
- def test_fallback_singular(self):
- # in the case of singular offsets, we don't necessarily know which utc
- # offset the new Timestamp will wind up in (the tz for 1 month may be
- # different from 1 second) so we don't specify an expected_utc_offset
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- self._test_all_offsets(
- n=1,
- tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
- expected_utc_offset=None,
- )
-
- def test_springforward_singular(self):
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- self._test_all_offsets(
- n=1,
- tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
- expected_utc_offset=None,
- )
-
- offset_classes = {
- MonthBegin: ["11/2/2012", "12/1/2012"],
- MonthEnd: ["11/2/2012", "11/30/2012"],
- BMonthBegin: ["11/2/2012", "12/3/2012"],
- BMonthEnd: ["11/2/2012", "11/30/2012"],
- CBMonthBegin: ["11/2/2012", "12/3/2012"],
- CBMonthEnd: ["11/2/2012", "11/30/2012"],
- SemiMonthBegin: ["11/2/2012", "11/15/2012"],
- SemiMonthEnd: ["11/2/2012", "11/15/2012"],
- Week: ["11/2/2012", "11/9/2012"],
- YearBegin: ["11/2/2012", "1/1/2013"],
- YearEnd: ["11/2/2012", "12/31/2012"],
- BYearBegin: ["11/2/2012", "1/1/2013"],
- BYearEnd: ["11/2/2012", "12/31/2012"],
- QuarterBegin: ["11/2/2012", "12/1/2012"],
- QuarterEnd: ["11/2/2012", "12/31/2012"],
- BQuarterBegin: ["11/2/2012", "12/3/2012"],
- BQuarterEnd: ["11/2/2012", "12/31/2012"],
- Day: ["11/4/2012", "11/4/2012 23:00"],
- }.items()
-
- @pytest.mark.parametrize("tup", offset_classes)
- def test_all_offset_classes(self, tup):
- offset, test_values = tup
-
- first = Timestamp(test_values[0], tz="US/Eastern") + offset()
- second = Timestamp(test_values[1], tz="US/Eastern")
- assert first == second
-
-
# ---------------------------------------------------------------------
diff --git a/pandas/tests/tseries/offsets/test_opening_times.py b/pandas/tests/tseries/offsets/test_opening_times.py
new file mode 100644
index 0000000000000..107436e4b3343
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_opening_times.py
@@ -0,0 +1,456 @@
+"""
+Test offset.BusinessHour._next_opening_time and offset.BusinessHour._prev_opening_time
+"""
+from datetime import datetime
+
+import pytest
+
+from pandas._libs.tslibs.offsets import BusinessHour
+
+
+class TestOpeningTimes:
+ # opening time should be affected by sign of n, not by n's value and end
+ opening_time_cases = [
+ (
+ [
+ BusinessHour(),
+ BusinessHour(n=2),
+ BusinessHour(n=4),
+ BusinessHour(end="10:00"),
+ BusinessHour(n=2, end="4:00"),
+ BusinessHour(n=4, end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ # if timestamp is on opening time, next opening time is
+ # as it is
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ # 2014-07-05 is saturday
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 8, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start="11:15"),
+ BusinessHour(n=2, start="11:15"),
+ BusinessHour(n=3, start="11:15"),
+ BusinessHour(start="11:15", end="10:00"),
+ BusinessHour(n=2, start="11:15", end="4:00"),
+ BusinessHour(n=3, start="11:15", end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 11, 15),
+ datetime(2014, 6, 30, 11, 15),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15, 1): (
+ datetime(2014, 7, 3, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 11, 15),
+ datetime(2014, 7, 3, 11, 15),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(-1),
+ BusinessHour(n=-2),
+ BusinessHour(n=-4),
+ BusinessHour(n=-1, end="10:00"),
+ BusinessHour(n=-2, end="4:00"),
+ BusinessHour(n=-4, end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 3, 9),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 9): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 8, 9),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start="17:00", end="05:00"),
+ BusinessHour(n=3, start="17:00", end="03:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 6, 30, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 4, 17): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 3, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 7, 17, 1): (
+ datetime(2014, 7, 8, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(-1, start="17:00", end="05:00"),
+ BusinessHour(n=-2, start="17:00", end="03:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 16, 59): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 3, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 7, 18): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 8, 17),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),
+ BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),
+ BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),
+ BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),
+ BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 11, 15),
+ datetime(2014, 6, 30, 15),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15, 1): (
+ datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 11, 15),
+ datetime(2014, 7, 3, 15),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 12): (
+ datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 7, 11, 15),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),
+ BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 8),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 16, 59): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 8),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 7, 18): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 8, 8),
+ ),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", opening_time_cases)
+ def test_opening_time(self, case):
+ _offsets, cases = case
+ for offset in _offsets:
+ for dt, (exp_next, exp_prev) in cases.items():
+ assert offset._next_opening_time(dt) == exp_next
+ assert offset._prev_opening_time(dt) == exp_prev
diff --git a/pandas/tests/tseries/offsets/test_week.py b/pandas/tests/tseries/offsets/test_week.py
new file mode 100644
index 0000000000000..54751a70b151d
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_week.py
@@ -0,0 +1,297 @@
+"""
+Tests for offset.Week, offset.WeekofMonth and offset.LastWeekofMonth
+"""
+from datetime import datetime, timedelta
+
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import LastWeekOfMonth, Week, WeekOfMonth
+
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ WeekDay,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+
+
+class TestWeek(Base):
+ _offset = Week
+ d = Timestamp(datetime(2008, 1, 2))
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_repr(self):
+ assert repr(Week(weekday=0)) == "<Week: weekday=0>"
+ assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
+ assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
+
+ def test_corner(self):
+ with pytest.raises(ValueError, match="Day must be"):
+ Week(weekday=7)
+
+ with pytest.raises(ValueError, match="Day must be"):
+ Week(weekday=-1)
+
+ def test_is_anchored(self):
+ assert Week(weekday=0).is_anchored()
+ assert not Week().is_anchored()
+ assert not Week(2, weekday=2).is_anchored()
+ assert not Week(2).is_anchored()
+
+ offset_cases = []
+ # not business week
+ offset_cases.append(
+ (
+ Week(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 8),
+ datetime(2008, 1, 4): datetime(2008, 1, 11),
+ datetime(2008, 1, 5): datetime(2008, 1, 12),
+ datetime(2008, 1, 6): datetime(2008, 1, 13),
+ datetime(2008, 1, 7): datetime(2008, 1, 14),
+ },
+ )
+ )
+
+ # Mon
+ offset_cases.append(
+ (
+ Week(weekday=0),
+ {
+ datetime(2007, 12, 31): datetime(2008, 1, 7),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 14),
+ },
+ )
+ )
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append(
+ (
+ Week(0, weekday=0),
+ {
+ datetime(2007, 12, 31): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ )
+ )
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append(
+ (
+ Week(-2, weekday=1),
+ {
+ datetime(2010, 4, 6): datetime(2010, 3, 23),
+ datetime(2010, 4, 8): datetime(2010, 3, 30),
+ datetime(2010, 4, 5): datetime(2010, 3, 23),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("weekday", range(7))
+ def test_is_on_offset(self, weekday):
+ offset = Week(weekday=weekday)
+
+ for day in range(1, 8):
+ date = datetime(2008, 1, day)
+
+ if day % 7 == weekday:
+ expected = True
+ else:
+ expected = False
+ assert_is_on_offset(offset, date, expected)
+
+
+class TestWeekOfMonth(Base):
+ _offset = WeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_constructor(self):
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=4, weekday=0)
+
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=-1, weekday=0)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-7)
+
+ def test_repr(self):
+ assert (
+ repr(WeekOfMonth(weekday=1, week=2)) == "<WeekOfMonth: week=2, weekday=1>"
+ )
+
+ def test_offset(self):
+ date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
+ date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
+ date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
+ date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
+
+ # see for loop for structure
+ test_cases = [
+ (-2, 2, 1, date1, datetime(2010, 11, 16)),
+ (-2, 2, 1, date2, datetime(2010, 11, 16)),
+ (-2, 2, 1, date3, datetime(2010, 11, 16)),
+ (-2, 2, 1, date4, datetime(2010, 12, 21)),
+ (-1, 2, 1, date1, datetime(2010, 12, 21)),
+ (-1, 2, 1, date2, datetime(2010, 12, 21)),
+ (-1, 2, 1, date3, datetime(2010, 12, 21)),
+ (-1, 2, 1, date4, datetime(2011, 1, 18)),
+ (0, 0, 1, date1, datetime(2011, 1, 4)),
+ (0, 0, 1, date2, datetime(2011, 2, 1)),
+ (0, 0, 1, date3, datetime(2011, 2, 1)),
+ (0, 0, 1, date4, datetime(2011, 2, 1)),
+ (0, 1, 1, date1, datetime(2011, 1, 11)),
+ (0, 1, 1, date2, datetime(2011, 1, 11)),
+ (0, 1, 1, date3, datetime(2011, 2, 8)),
+ (0, 1, 1, date4, datetime(2011, 2, 8)),
+ (0, 0, 1, date1, datetime(2011, 1, 4)),
+ (0, 1, 1, date2, datetime(2011, 1, 11)),
+ (0, 2, 1, date3, datetime(2011, 1, 18)),
+ (0, 3, 1, date4, datetime(2011, 1, 25)),
+ (1, 0, 0, date1, datetime(2011, 2, 7)),
+ (1, 0, 0, date2, datetime(2011, 2, 7)),
+ (1, 0, 0, date3, datetime(2011, 2, 7)),
+ (1, 0, 0, date4, datetime(2011, 2, 7)),
+ (1, 0, 1, date1, datetime(2011, 2, 1)),
+ (1, 0, 1, date2, datetime(2011, 2, 1)),
+ (1, 0, 1, date3, datetime(2011, 2, 1)),
+ (1, 0, 1, date4, datetime(2011, 2, 1)),
+ (1, 0, 2, date1, datetime(2011, 1, 5)),
+ (1, 0, 2, date2, datetime(2011, 2, 2)),
+ (1, 0, 2, date3, datetime(2011, 2, 2)),
+ (1, 0, 2, date4, datetime(2011, 2, 2)),
+ (1, 2, 1, date1, datetime(2011, 1, 18)),
+ (1, 2, 1, date2, datetime(2011, 1, 18)),
+ (1, 2, 1, date3, datetime(2011, 2, 15)),
+ (1, 2, 1, date4, datetime(2011, 2, 15)),
+ (2, 2, 1, date1, datetime(2011, 2, 15)),
+ (2, 2, 1, date2, datetime(2011, 2, 15)),
+ (2, 2, 1, date3, datetime(2011, 3, 15)),
+ (2, 2, 1, date4, datetime(2011, 3, 15)),
+ ]
+
+ for n, week, weekday, dt, expected in test_cases:
+ offset = WeekOfMonth(n, week=week, weekday=weekday)
+ assert_offset_equal(offset, dt, expected)
+
+ # try subtracting
+ result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
+ assert result == datetime(2011, 1, 12)
+
+ result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
+ assert result == datetime(2011, 2, 2)
+
+ on_offset_cases = [
+ (0, 0, datetime(2011, 2, 7), True),
+ (0, 0, datetime(2011, 2, 6), False),
+ (0, 0, datetime(2011, 2, 14), False),
+ (1, 0, datetime(2011, 2, 14), True),
+ (0, 1, datetime(2011, 2, 1), True),
+ (0, 1, datetime(2011, 2, 8), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ week, weekday, dt, expected = case
+ offset = WeekOfMonth(week=week, weekday=weekday)
+ assert offset.is_on_offset(dt) == expected
+
+
+class TestLastWeekOfMonth(Base):
+ _offset = LastWeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_constructor(self):
+ with pytest.raises(ValueError, match="^N cannot be 0"):
+ LastWeekOfMonth(n=0, weekday=1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=-1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=7)
+
+ def test_offset(self):
+ # Saturday
+ last_sat = datetime(2013, 8, 31)
+ next_sat = datetime(2013, 9, 28)
+ offset_sat = LastWeekOfMonth(n=1, weekday=5)
+
+ one_day_before = last_sat + timedelta(days=-1)
+ assert one_day_before + offset_sat == last_sat
+
+ one_day_after = last_sat + timedelta(days=+1)
+ assert one_day_after + offset_sat == next_sat
+
+ # Test On that day
+ assert last_sat + offset_sat == next_sat
+
+ # Thursday
+
+ offset_thur = LastWeekOfMonth(n=1, weekday=3)
+ last_thurs = datetime(2013, 1, 31)
+ next_thurs = datetime(2013, 2, 28)
+
+ one_day_before = last_thurs + timedelta(days=-1)
+ assert one_day_before + offset_thur == last_thurs
+
+ one_day_after = last_thurs + timedelta(days=+1)
+ assert one_day_after + offset_thur == next_thurs
+
+ # Test on that day
+ assert last_thurs + offset_thur == next_thurs
+
+ three_before = last_thurs + timedelta(days=-3)
+ assert three_before + offset_thur == last_thurs
+
+ two_after = last_thurs + timedelta(days=+2)
+ assert two_after + offset_thur == next_thurs
+
+ offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
+ assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
+
+ on_offset_cases = [
+ (WeekDay.SUN, datetime(2013, 1, 27), True),
+ (WeekDay.SAT, datetime(2013, 3, 30), True),
+ (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
+ (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
+ (WeekDay.MON, datetime(2013, 2, 25), True),
+ (WeekDay.SAT, datetime(2013, 11, 30), True),
+ (WeekDay.SAT, datetime(2006, 8, 26), True),
+ (WeekDay.SAT, datetime(2007, 8, 25), True),
+ (WeekDay.SAT, datetime(2008, 8, 30), True),
+ (WeekDay.SAT, datetime(2009, 8, 29), True),
+ (WeekDay.SAT, datetime(2010, 8, 28), True),
+ (WeekDay.SAT, datetime(2011, 8, 27), True),
+ (WeekDay.SAT, datetime(2019, 8, 31), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ weekday, dt, expected = case
+ offset = LastWeekOfMonth(weekday=weekday)
+ assert offset.is_on_offset(dt) == expected
+
+ def test_repr(self):
+ assert (
+ repr(LastWeekOfMonth(n=2, weekday=1)) == "<2 * LastWeekOfMonths: weekday=1>"
+ )
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 9921355bdf2ee..260f7368123a4 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -7,6 +7,11 @@
import pandas as pd
from pandas import Timestamp
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
from pandas.tseries.offsets import (
BMonthBegin,
@@ -23,9 +28,6 @@
YearEnd,
)
-from .common import assert_is_on_offset, assert_offset_equal
-from .test_offsets import Base
-
# --------------------------------------------------------------------
# Misc
| This is to address xref #26807 specifically for pandas/tests/tseries/offsets/test_offsets.py . I tried to get all the new modules below about 1000 lines and break it up in logical ways.
Other than moving code, I have changed some list construction for pytest parameterizing to be a tiny bit more compact by changing a bunch of sequential `append`s to a single list literal. Other than that no changes.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38924 | 2021-01-03T17:59:12Z | 2021-01-03T23:22:06Z | 2021-01-03T23:22:06Z | 2021-01-05T17:03:38Z |
TST: GH30999 Add placeholder messages to pandas/tests/io/test_sql.py and remove test for numexpr < 2.6.8 | diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index 9fc3ed4800d09..8fa11ab75dd67 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -36,14 +36,10 @@ def testit():
if engine == "numexpr":
try:
- import numexpr as ne
+ import numexpr as ne # noqa F401
except ImportError:
pytest.skip("no numexpr")
else:
- if LooseVersion(ne.__version__) < LooseVersion(VERSIONS["numexpr"]):
- with pytest.raises(ImportError):
- testit()
- else:
- testit()
+ testit()
else:
testit()
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 52869f3f2fd42..6fb120faa6db2 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2896,7 +2896,7 @@ def test_execute_fail(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
- with pytest.raises(Exception):
+ with pytest.raises(Exception, match="<insert message here>"):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
@@ -2917,7 +2917,7 @@ def test_execute_closed_connection(self, request, datapath):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
- with pytest.raises(Exception):
+ with pytest.raises(Exception, match="<insert message here>"):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
| This is my attempt to finally finish off #30999
In pandas/tests/io/test_sql.py, there is a whole test class skipped. It looks like xref #20536 is supposed to address that, but no one has commented there since March 2018, so I don't think that's going to be fixed any time soon. I noticed that there were other tests in the same module with `match="<insert message here>"` so I decided to put it in the two tests that I can't figure out the correct error message for.
In pandas/tests/computation/test_compat.py there was an if statement that the numexpr library is at least 2.6.8. I tried to set up an environment with a lower version but conda couldn't resolve the dependencies. That test isn't running in the CI (xref #38876) and that test was last touched in a substantive way in 2016. I think that portion of the test is no longer required.
Not sure that I actually addressed these correctly, but I made an attempt so we could have a conversation about it in a more concrete way.
- [x] xref #30999
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38920 | 2021-01-03T12:22:31Z | 2021-01-03T17:24:50Z | 2021-01-03T17:24:50Z | 2021-01-05T17:01:47Z |
BUG: fix the bad error raised by HDFStore.put() | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 45f8344a1ebe0..5b58ba14a1eff 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -273,6 +273,7 @@ I/O
- Allow custom error values for parse_dates argument of :func:`read_sql`, :func:`read_sql_query` and :func:`read_sql_table` (:issue:`35185`)
- Bug in :func:`to_hdf` raising ``KeyError`` when trying to apply
for subclasses of ``DataFrame`` or ``Series`` (:issue:`33748`).
+- Bug in :meth:`~HDFStore.put` raising a wrong ``TypeError`` when saving a DataFrame with non-string dtype (:issue:`34274`)
- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`)
- Bug in :func:`read_excel` forward filling :class:`MultiIndex` names with multiple header and index columns specified (:issue:`34673`)
- :func:`pandas.read_excel` now respects :func:``pandas.set_option`` (:issue:`34252`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ceaf6e1ac21e5..d2b02038f8b78 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3915,6 +3915,7 @@ def _create_axes(
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
+ block_columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
@@ -4878,7 +4879,14 @@ def _unconvert_index(
def _maybe_convert_for_string_atom(
- name: str, block: "Block", existing_col, min_itemsize, nan_rep, encoding, errors
+ name: str,
+ block: "Block",
+ existing_col,
+ min_itemsize,
+ nan_rep,
+ encoding,
+ errors,
+ block_columns: List[str],
):
if not block.is_object:
return block.values
@@ -4912,14 +4920,20 @@ def _maybe_convert_for_string_atom(
# we cannot serialize this data, so report an exception on a column
# by column basis
- for i in range(len(block.shape[0])):
+
+ # expected behaviour:
+ # search block for a non-string object column by column
+ for i in range(block.shape[0]):
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
- iloc = block.mgr_locs.indexer[i]
+ error_column_label = (
+ block_columns[i] if len(block_columns) > i else f"No.{i}"
+ )
raise TypeError(
- f"Cannot serialize the column [{iloc}] because\n"
- f"its data contents are [{inferred_type}] object dtype"
+ f"Cannot serialize the column [{error_column_label}]\n"
+ f"because its data contents are not [string] but "
+ f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 7e288ec6f5063..3f0fd6e7483f8 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -2055,7 +2055,10 @@ def test_append_raise(self, setup_path):
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
- msg = re.escape("object of type 'int' has no len()")
+ msg = re.escape(
+ """Cannot serialize the column [invalid]
+because its data contents are not [string] but [mixed] object dtype"""
+ )
with pytest.raises(TypeError, match=msg):
store.append("df", df)
@@ -2221,7 +2224,10 @@ def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
- msg = "object of type 'int' has no len()"
+ msg = re.escape(
+ """Cannot serialize the column [datetime1]
+because its data contents are not [string] but [date] object dtype"""
+ )
with pytest.raises(TypeError, match=msg):
store.append("df_unimplemented", df)
| - [x] closes #34274
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
*P.S. something was wrong with `git diff upstream/master` so I directly ran `flake8 ./pandas/io/pytables.py` as it's the only file changed*
------
I was running into the same problem as #34274 and found where the error is. But (as I've just started using pandas for several days:sweet_smile:) I have few knowledge about pandas, so maybe I didn't make the full use of pandas' components.
As seen in #34274, if a `DataFrame` contains non-string elements and is about to be written into an HDF5 file by `HDFStore.put()`, a `TypeError: object of type 'int' has no len()` error is raised.
But it's not the right "error" expected. `HDFStore.put()` can't serialize some types of element, `so report an exception on a column by column basis` is actually needed.
This commit fixes this, now it raises `TypeError: Cannot serialize the column [{column_No}] because its data contents are not string but [{non_string_type}] object dtype` as expected. | https://api.github.com/repos/pandas-dev/pandas/pulls/38919 | 2021-01-03T12:00:19Z | 2021-01-05T02:22:33Z | 2021-01-05T02:22:33Z | 2021-01-05T02:22:38Z |
Remove Python2 numeric relics | diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index 5a3febdcf75e7..7478efbf22609 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -122,8 +122,8 @@ def setup(self, op):
n_rows = 500
# construct dataframe with 2 blocks
- arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype("f8")
- arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("f4")
+ arr1 = np.random.randn(n_rows, n_cols // 2).astype("f8")
+ arr2 = np.random.randn(n_rows, n_cols // 2).astype("f4")
df = pd.concat(
[pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True
)
@@ -131,9 +131,9 @@ def setup(self, op):
df._consolidate_inplace()
# TODO: GH#33198 the setting here shoudlnt need two steps
- arr1 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
- arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("i8")
- arr3 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
+ arr1 = np.random.randn(n_rows, n_cols // 4).astype("f8")
+ arr2 = np.random.randn(n_rows, n_cols // 2).astype("i8")
+ arr3 = np.random.randn(n_rows, n_cols // 4).astype("f8")
df2 = pd.concat(
[pd.DataFrame(arr1), pd.DataFrame(arr2), pd.DataFrame(arr3)],
axis=1,
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 7386b0b903afd..dc6fd2ff61423 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -263,7 +263,7 @@ class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
- arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100)
+ arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
@@ -648,9 +648,9 @@ class Describe:
def setup(self):
self.df = DataFrame(
{
- "a": np.random.randint(0, 100, int(1e6)),
- "b": np.random.randint(0, 100, int(1e6)),
- "c": np.random.randint(0, 100, int(1e6)),
+ "a": np.random.randint(0, 100, 10 ** 6),
+ "b": np.random.randint(0, 100, 10 ** 6),
+ "c": np.random.randint(0, 100, 10 ** 6),
}
)
diff --git a/asv_bench/benchmarks/hash_functions.py b/asv_bench/benchmarks/hash_functions.py
index 17bf434acf38a..5227ad0f53a04 100644
--- a/asv_bench/benchmarks/hash_functions.py
+++ b/asv_bench/benchmarks/hash_functions.py
@@ -103,9 +103,9 @@ class Float64GroupIndex:
# GH28303
def setup(self):
self.df = pd.date_range(
- start="1/1/2018", end="1/2/2018", periods=1e6
+ start="1/1/2018", end="1/2/2018", periods=10 ** 6
).to_frame()
- self.group_index = np.round(self.df.index.astype(int) / 1e9)
+ self.group_index = np.round(self.df.index.astype(int) / 10 ** 9)
def time_groupby(self):
self.df.groupby(self.group_index).last()
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 40b064229ae49..e17c985321c47 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -42,7 +42,7 @@ class ToNumericDowncast:
]
N = 500000
- N2 = int(N / 2)
+ N2 = N // 2
data_dict = {
"string-int": ["1"] * N2 + [2] * N2,
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index a572b8a70a680..b0ad43ace88b5 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -158,7 +158,7 @@ def setup(self):
daily_dates = date_index.to_period("D").to_timestamp("S", "S")
self.fracofday = date_index.values - daily_dates.values
self.fracofday = self.fracofday.astype("timedelta64[ns]")
- self.fracofday = self.fracofday.astype(np.float64) / 86400000000000.0
+ self.fracofday = self.fracofday.astype(np.float64) / 86_400_000_000_000
self.fracofday = Series(self.fracofday, daily_dates)
index = date_range(date_index.min(), date_index.max(), freq="D")
self.temp = Series(1.0, index)[self.fracofday.index]
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index ab9c46fd2bf0b..306083e9c22b2 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -171,7 +171,7 @@ class PeakMemFixedWindowMinMax:
params = ["min", "max"]
def setup(self, operation):
- N = int(1e6)
+ N = 10 ** 6
arr = np.random.random(N)
self.roll = pd.Series(arr).rolling(2)
@@ -233,7 +233,7 @@ class GroupbyLargeGroups:
def setup(self):
N = 100000
- self.df = pd.DataFrame({"A": [1, 2] * int(N / 2), "B": np.random.randn(N)})
+ self.df = pd.DataFrame({"A": [1, 2] * (N // 2), "B": np.random.randn(N)})
def time_rolling_multiindex_creation(self):
self.df.groupby("A").rolling(3).mean()
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index b52c8142334be..3f4da8acf4db0 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -284,7 +284,7 @@ def time_dir_strings(self):
class SeriesGetattr:
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
- self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=int(1e6)))
+ self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10 ** 6))
def time_series_datetimeindex_repr(self):
getattr(self.s, "a", None)
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 4ed542b3a28e3..94498e54f0f06 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -346,7 +346,7 @@ def time_iso8601_tz_spaceformat(self):
class ToDatetimeNONISO8601:
def setup(self):
N = 10000
- half = int(N / 2)
+ half = N // 2
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
self.same_offset = [ts_string_1] * N
@@ -376,7 +376,7 @@ def setup(self):
self.same_offset = ["10/11/2018 00:00:00.045-07:00"] * N
self.diff_offset = [
f"10/11/2018 00:00:00.045-0{offset}:00" for offset in range(10)
- ] * int(N / 10)
+ ] * (N // 10)
def time_exact(self):
to_datetime(self.s2, format="%d%b%y")
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 897f4ab59c370..0591fc6afd633 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -901,7 +901,7 @@ def _create_missing_idx(nrows, ncols, density, random_state=None):
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
- size = int(np.round((1 - density) * nrows * ncols))
+ size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 2e43937ddd0c2..1291fc25fc21d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -835,7 +835,7 @@ def value_counts(
result = result.sort_values(ascending=ascending)
if normalize:
- result = result / float(counts.sum())
+ result = result / counts.sum()
return result
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 8b350fef27fb1..fe5db3ec5fd8c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2214,7 +2214,7 @@ def describe(self):
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
- freqs = counts / float(counts.sum())
+ freqs = counts / counts.sum()
from pandas.core.reshape.concat import concat
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 58345aa22eac1..86c8d15a21227 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -572,7 +572,7 @@ def __iter__(self):
data = self.asi8
length = len(self)
chunksize = 10000
- chunks = int(length / chunksize) + 1
+ chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
@@ -1847,12 +1847,12 @@ def to_julian_date(self):
+ 1_721_118.5
+ (
self.hour
- + self.minute / 60.0
- + self.second / 3600.0
- + self.microsecond / 3600.0 / 1e6
- + self.nanosecond / 3600.0 / 1e9
+ + self.minute / 60
+ + self.second / 3600
+ + self.microsecond / 3600 / 10 ** 6
+ + self.nanosecond / 3600 / 10 ** 9
)
- / 24.0
+ / 24
)
# -----------------------------------------------------------------
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index fa648157d7678..26dbe5e0dba44 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -578,7 +578,7 @@ def density(self):
>>> s.density
0.6
"""
- return float(self.sp_index.npoints) / float(self.sp_index.length)
+ return self.sp_index.npoints / self.sp_index.length
@property
def npoints(self) -> int:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8410f3d491891..55136e0dedcf5 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -338,7 +338,7 @@ def __iter__(self):
data = self.asi8
length = len(self)
chunksize = 10000
- chunks = int(length / chunksize) + 1
+ chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index eee5f72a05738..fe86bf3f582ca 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5336,7 +5336,7 @@ def sample(
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
- n = int(round(frac * axis_length))
+ n = round(frac * axis_length)
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 69f11484237a3..56e171e1a5db1 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -122,7 +122,7 @@ def should_cache(
return False
if len(arg) <= 5000:
- check_count = int(len(arg) * 0.1)
+ check_count = len(arg) // 10
else:
check_count = 500
else:
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 85118549300ca..983f7220c2fb9 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -61,7 +61,7 @@ def get_center_of_mass(
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
- comass = (span - 1) / 2.0
+ comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
@@ -70,7 +70,7 @@ def get_center_of_mass(
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
- comass = (1.0 - alpha) / alpha
+ comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b23b5fe5b34a8..b43dde7d2a053 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -856,7 +856,7 @@ def _value_with_fmt(self, val):
elif isinstance(val, datetime.date):
fmt = self.date_format
elif isinstance(val, datetime.timedelta):
- val = val.total_seconds() / float(86400)
+ val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 5ad06bdcd8383..65c51c78383a9 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -282,7 +282,7 @@ def _generate_multiindex_header_rows(self) -> Iterator[List[Label]]:
def _save_body(self) -> None:
nrows = len(self.data_index)
- chunks = int(nrows / self.chunksize) + 1
+ chunks = (nrows // self.chunksize) + 1
for i in range(chunks):
start_i = i * self.chunksize
end_i = min(start_i + self.chunksize, nrows)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b3c2411304f6b..8265d5ef8f94b 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1637,7 +1637,7 @@ def is_dates_only(
values_int = values.asi8
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 1e9
+ one_day_nanos = 86400 * 10 ** 9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
@@ -1741,7 +1741,7 @@ def get_format_timedelta64(
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 1e9
+ one_day_nanos = 86400 * 10 ** 9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
index 4ebb78f29c739..1fe2ed9806535 100644
--- a/pandas/io/formats/string.py
+++ b/pandas/io/formats/string.py
@@ -160,7 +160,7 @@ def _fit_strcols_to_terminal_width(self, strcols: List[List[str]]) -> str:
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
- mid = int(round(n_cols / 2.0))
+ mid = round(n_cols / 2)
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aa4bcd8b1565a..bbc5e6ad82493 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -905,7 +905,7 @@ def insert(self, chunksize: Optional[int] = None, method: Optional[str] = None):
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
- chunks = int(nrows / chunksize) + 1
+ chunks = (nrows // chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 38789fffed8a0..978010efd7ee5 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -38,7 +38,7 @@
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
-MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
+MUSEC_PER_DAY = 10 ** 6 * SEC_PER_DAY
_mpl_units = {} # Cache for units overwritten by us
@@ -116,7 +116,7 @@ def deregister():
def _to_ordinalf(tm: pydt.time) -> float:
- tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + float(tm.microsecond / 1e6)
+ tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10 ** 6
return tot_sec
@@ -182,7 +182,7 @@ def __call__(self, x, pos=0) -> str:
"""
fmt = "%H:%M:%S.%f"
s = int(x)
- msus = int(round((x - s) * 1e6))
+ msus = round((x - s) * 10 ** 6)
ms = msus // 1000
us = msus % 1000
m, s = divmod(s, 60)
@@ -429,7 +429,7 @@ def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime:
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
- microsecond = int(1e6 * remainder)
+ microsecond = int(1_000_000 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(
@@ -439,7 +439,7 @@ def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
- dt += timedelta(microseconds=1e6 - microsecond)
+ dt += timedelta(microseconds=1_000_000 - microsecond)
return dt
@@ -611,27 +611,27 @@ def _second_finder(label_interval):
info_fmt[day_start] = "%H:%M:%S\n%d-%b"
info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y"
- if span < periodsperday / 12000.0:
+ if span < periodsperday / 12000:
_second_finder(1)
- elif span < periodsperday / 6000.0:
+ elif span < periodsperday / 6000:
_second_finder(2)
- elif span < periodsperday / 2400.0:
+ elif span < periodsperday / 2400:
_second_finder(5)
- elif span < periodsperday / 1200.0:
+ elif span < periodsperday / 1200:
_second_finder(10)
- elif span < periodsperday / 800.0:
+ elif span < periodsperday / 800:
_second_finder(15)
- elif span < periodsperday / 400.0:
+ elif span < periodsperday / 400:
_second_finder(30)
- elif span < periodsperday / 150.0:
+ elif span < periodsperday / 150:
_minute_finder(1)
- elif span < periodsperday / 70.0:
+ elif span < periodsperday / 70:
_minute_finder(2)
- elif span < periodsperday / 24.0:
+ elif span < periodsperday / 24:
_minute_finder(5)
- elif span < periodsperday / 12.0:
+ elif span < periodsperday / 12:
_minute_finder(15)
- elif span < periodsperday / 6.0:
+ elif span < periodsperday / 6:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
@@ -1058,7 +1058,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
- s, ns = divmod(x, 1e9)
+ s, ns = divmod(x, 10 ** 9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
@@ -1072,7 +1072,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
def __call__(self, x, pos=0) -> str:
(vmin, vmax) = tuple(self.axis.get_view_interval())
- n_decimals = int(np.ceil(np.log10(100 * 1e9 / abs(vmax - vmin))))
+ n_decimals = int(np.ceil(np.log10(100 * 10 ** 9 / abs(vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py
index f519d1e96f5b0..c564e6ed39f7d 100644
--- a/pandas/plotting/_matplotlib/misc.py
+++ b/pandas/plotting/_matplotlib/misc.py
@@ -55,7 +55,7 @@ def scatter_matrix(
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
- rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
+ rdelta_ext = (rmax_ - rmin_) * range_padding / 2
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in enumerate(df.columns):
@@ -158,10 +158,7 @@ def normalize(series):
m = len(frame.columns) - 1
s = np.array(
- [
- (np.cos(t), np.sin(t))
- for t in [2.0 * np.pi * (i / float(m)) for i in range(m)]
- ]
+ [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]]
)
for i in range(n):
@@ -447,10 +444,10 @@ def autocorrelation_plot(
ax.set_xlim(1, n)
ax.set_ylim(-1.0, 1.0)
mean = np.mean(data)
- c0 = np.sum((data - mean) ** 2) / float(n)
+ c0 = np.sum((data - mean) ** 2) / n
def r(h):
- return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
+ return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0
x = np.arange(n) + 1
y = [r(loc) for loc in x]
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 0af2f70f896be..f288e6ebb783c 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -61,12 +61,10 @@ def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int
nrows, ncols = layout
- # Python 2 compat
- ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
- layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
+ layout = nrows, ncols = (ceil(nplots / ncols), ncols)
elif ncols == -1 and nrows > 0:
- layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
+ layout = nrows, ncols = (nrows, ceil(nplots / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 6a18810700205..199c521cfc81b 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -305,14 +305,14 @@ def test_truncate_out_of_bounds(self):
# GH11382
# small
- shape = [int(2e3)] + ([1] * (self._ndim - 1))
+ shape = [2000] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
- shape = [int(2e6)] + ([1] * (self._ndim - 1))
+ shape = [2_000_000] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 197738330efe1..c930acd179330 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -727,7 +727,7 @@ def test_to_excel_timedelta(self, path):
df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
- lambda x: timedelta(seconds=x).total_seconds() / float(86400)
+ lambda x: timedelta(seconds=x).total_seconds() / 86400
)
df.to_excel(path, "test1")
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 3f4c21389daed..7e288ec6f5063 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -3164,7 +3164,7 @@ def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
- chunksize = int(1e4)
+ chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 66a4f9598c49b..e3fd404ec1906 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1075,7 +1075,7 @@ def test_time_musec(self):
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
- us = int(round((t - int(t)) * 1e6))
+ us = round((t - int(t)) * 1e6)
h, m = divmod(m, 60)
rs = l.get_text()
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index b707757574ecd..9cd13b2312ea7 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -341,7 +341,7 @@ def test_pie_series(self):
ax = _check_plot_works(
series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7
)
- pcts = [f"{s*100:.2f}" for s in series.values / float(series.sum())]
+ pcts = [f"{s*100:.2f}" for s in series.values / series.sum()]
expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 8ec8f1e0457fb..7aefd42ada322 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -393,9 +393,9 @@ def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
- assert result == td.value / float(86400 * 1e9)
+ assert result == td.value / (86400 * 10 ** 9)
result = td / np.timedelta64(1, "s")
- assert result == td.value / float(1e9)
+ assert result == td.value / 10 ** 9
result = td / np.timedelta64(1, "ns")
assert result == td.value
@@ -416,7 +416,7 @@ def test_td_div_numeric_scalar(self):
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
- result = td / 5.0
+ result = td / 5
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index f94e174a26824..3fa411b421015 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -231,7 +231,7 @@ def test_timedelta_fillna(self, frame_or_series):
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
- result = obj.fillna(np.timedelta64(int(1e9)))
+ result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index c0196549cee33..de3ff6e80ad66 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -184,7 +184,7 @@ def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
arr = np.arange(0, 1, 1e-6)[-10:]
result = pd.to_timedelta(arr, unit="s")
- expected_asi8 = np.arange(999990000, int(1e9), 1000, dtype="int64")
+ expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64")
tm.assert_numpy_array_equal(result.asi8, expected_asi8)
def test_to_timedelta_coerce_strings_unit(self):
diff --git a/pandas/tests/window/moments/test_moments_rolling_quantile.py b/pandas/tests/window/moments/test_moments_rolling_quantile.py
index 1b6d4a5c82164..e06a5faabe310 100644
--- a/pandas/tests/window/moments/test_moments_rolling_quantile.py
+++ b/pandas/tests/window/moments/test_moments_rolling_quantile.py
@@ -18,8 +18,8 @@ def scoreatpercentile(a, per):
retval = values[-1]
else:
- qlow = float(idx) / float(values.shape[0] - 1)
- qhig = float(idx + 1) / float(values.shape[0] - 1)
+ qlow = idx / (values.shape[0] - 1)
+ qhig = (idx + 1) / (values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Modernize the code to Python 3 by removing Python 2 numeric hacks, such as:
- `a / b` of two integers now always returns a float, so no need to convert `a` and/or `b` to float (or to use float literals)
- `a // b` of two integers returns an integer, use it instead of `int(a / b)`
- `math.ceil`, `math.floor`, and `round` return integer, so no need to convert the result to integer
- `1e6` is a float, converting it to `int(1e6)` is slower than `10 ** 6` (or `1_000_000`)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38916 | 2021-01-03T05:52:11Z | 2021-01-03T17:09:49Z | 2021-01-03T17:09:49Z | 2021-01-05T07:10:41Z |
REF: de-duplicate code in libparsing/libperiod | diff --git a/pandas/_libs/tslibs/parsing.pxd b/pandas/_libs/tslibs/parsing.pxd
index 9c9262beaafad..25667f00e42b5 100644
--- a/pandas/_libs/tslibs/parsing.pxd
+++ b/pandas/_libs/tslibs/parsing.pxd
@@ -1,2 +1,3 @@
cpdef str get_rule_month(str source)
+cpdef quarter_to_myear(int year, int quarter, str freq)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index aeb1be121bc9e..5c3417ee2d93c 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -378,7 +378,7 @@ cpdef bint _does_string_look_like_datetime(str py_string):
cdef inline object _parse_dateabbr_string(object date_string, datetime default,
- object freq):
+ str freq=None):
cdef:
object ret
# year initialized to prevent compiler warnings
@@ -438,21 +438,13 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default,
f'quarter must be '
f'between 1 and 4: {date_string}')
- if freq is not None:
- # TODO: hack attack, #1228
- freq = getattr(freq, "freqstr", freq)
- try:
- mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1
- except (KeyError, ValueError):
- raise DateParseError(f'Unable to retrieve month '
- f'information from given '
- f'freq: {freq}')
-
- month = (mnum + (quarter - 1) * 3) % 12 + 1
- if month > mnum:
- year -= 1
- else:
- month = (quarter - 1) * 3 + 1
+ try:
+ # GH#1228
+ year, month = quarter_to_myear(year, quarter, freq)
+ except KeyError:
+ raise DateParseError("Unable to retrieve month "
+ "information from given "
+ f"freq: {freq}")
ret = default.replace(year=year, month=month)
return ret, 'quarter'
@@ -482,6 +474,41 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default,
raise ValueError(f'Unable to parse {date_string}')
+cpdef quarter_to_myear(int year, int quarter, str freq):
+ """
+ A quarterly frequency defines a "year" which may not coincide with
+ the calendar-year. Find the calendar-year and calendar-month associated
+ with the given year and quarter under the `freq`-derived calendar.
+
+ Parameters
+ ----------
+ year : int
+ quarter : int
+ freq : str or None
+
+ Returns
+ -------
+ year : int
+ month : int
+
+ See Also
+ --------
+ Period.qyear
+ """
+ if quarter <= 0 or quarter > 4:
+ raise ValueError("Quarter must be 1 <= q <= 4")
+
+ if freq is not None:
+ mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1
+ month = (mnum + (quarter - 1) * 3) % 12 + 1
+ if month > mnum:
+ year -= 1
+ else:
+ month = (quarter - 1) * 3 + 1
+
+ return year, month
+
+
cdef dateutil_parse(
str timestr,
object default,
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index cbd4e2e6704a9..f0d21a3a7a957 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -74,8 +74,7 @@ from pandas._libs.tslibs.dtypes cimport (
PeriodDtypeBase,
attrname_to_abbrevs,
)
-from pandas._libs.tslibs.parsing cimport get_rule_month
-
+from pandas._libs.tslibs.parsing cimport quarter_to_myear
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.nattype cimport (
@@ -2461,40 +2460,6 @@ cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day,
minute, second, 0, 0, base)
-def quarter_to_myear(year: int, quarter: int, freqstr: str):
- """
- A quarterly frequency defines a "year" which may not coincide with
- the calendar-year. Find the calendar-year and calendar-month associated
- with the given year and quarter under the `freq`-derived calendar.
-
- Parameters
- ----------
- year : int
- quarter : int
- freqstr : str
- Equivalent to freq.freqstr
-
- Returns
- -------
- year : int
- month : int
-
- See Also
- --------
- Period.qyear
- """
- if quarter <= 0 or quarter > 4:
- raise ValueError('Quarter must be 1 <= q <= 4')
-
- mnum = c_MONTH_NUMBERS[get_rule_month(freqstr)] + 1
- month = (mnum + (quarter - 1) * 3) % 12 + 1
- if month > mnum:
- year -= 1
-
- return year, month
- # TODO: This whole func is really similar to parsing.pyx L434-L450
-
-
def validate_end_alias(how):
how_dict = {'S': 'S', 'E': 'E',
'START': 'S', 'FINISH': 'E',
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index e0e40a666896d..94d36aef8da52 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -12,6 +12,7 @@
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
+ parsing,
period as libperiod,
to_offset,
)
@@ -1074,7 +1075,7 @@ def _range_from_fields(
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
- y, m = libperiod.quarter_to_myear(y, q, freqstr)
+ y, m = parsing.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38915 | 2021-01-03T05:19:22Z | 2021-01-04T00:10:20Z | 2021-01-04T00:10:20Z | 2021-01-04T01:18:49Z |
CLN: re-use sanitize_index | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7f2039c998f53..f1217e97aef5d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -97,6 +97,7 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
+from pandas.core.internals.construction import sanitize_index
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import ensure_key_mapped, nargsort
from pandas.core.strings import StringMethods
@@ -319,17 +320,7 @@ def __init__(
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
-
- # a scalar numpy array is list-like but doesn't
- # have a proper length
- try:
- if len(index) != len(data):
- raise ValueError(
- f"Length of passed values is {len(data)}, "
- f"index implies {len(index)}."
- )
- except TypeError:
- pass
+ sanitize_index(data, index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 5c9e5dcf3ae24..9dbfd2a5589c0 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -74,7 +74,7 @@ def test_dataframe_from_series(self, data):
assert isinstance(result._mgr.blocks[0], ExtensionBlock)
def test_series_given_mismatched_index_raises(self, data):
- msg = "Length of passed values is 3, index implies 5"
+ msg = r"Length of values \(3\) does not match length of index \(5\)"
with pytest.raises(ValueError, match=msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c7bd38bbd00b9..e35f37944e7da 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -576,7 +576,7 @@ def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
- msg = "Length of passed values is 3, index implies 4"
+ msg = r"Length of values \(3\) does not match length of index \(4\)"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
@@ -592,7 +592,7 @@ def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
- msg = "Length of passed values is 1, index implies 3"
+ msg = r"Length of values \(1\) does not match length of index \(3\)"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38912 | 2021-01-03T01:02:09Z | 2021-01-03T17:11:39Z | 2021-01-03T17:11:39Z | 2021-01-03T18:05:17Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.