title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: Panel.to_frame() with MultiIndex major axis | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9a0854494a897..8179c710b7a8a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -104,6 +104,8 @@ Bug Fixes
- Fixed string-representation of ``NaT`` to be "NaT" (:issue:`5708`)
- Fixed string-representation for Timestamp to show nanoseconds if present (:issue:`5912`)
- ``pd.match`` not returning passed sentinel
+ - ``Panel.to_frame()`` no longer fails when ``major_axis`` is a
+ ``MultiIndex`` (:issue:`5402`).
pandas 0.13.0
-------------
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 5c77c1e5e9516..ed964e76dd470 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2396,6 +2396,44 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
else:
return result_levels
+ def to_hierarchical(self, n_repeat, n_shuffle=1):
+ """
+ Return a MultiIndex reshaped to conform to the
+ shapes given by n_repeat and n_shuffle.
+
+ Useful to replicate and rearrange a MultiIndex for combination
+ with another Index with n_repeat items.
+
+ Parameters
+ ----------
+ n_repeat : int
+ Number of times to repeat the labels on self
+ n_shuffle : int
+ Controls the reordering of the labels. If the result is going
+ to be an inner level in a MultiIndex, n_shuffle will need to be
+ greater than one. The size of each label must divisible by
+ n_shuffle.
+
+ Returns
+ -------
+ MultiIndex
+
+ Examples
+ --------
+ >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
+ (2, u'one'), (2, u'two')])
+ >>> idx.to_hierarchical(3)
+ MultiIndex(levels=[[1, 2], [u'one', u'two']],
+ labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
+ """
+ levels = self.levels
+ labels = [np.repeat(x, n_repeat) for x in self.labels]
+ # Assumes that each label is divisible by n_shuffle
+ labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
+ names = self.names
+ return MultiIndex(levels=levels, labels=labels, names=names)
+
@property
def is_all_dates(self):
return False
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 8c50396c503a0..832874f08561b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -796,7 +796,9 @@ def groupby(self, function, axis='major'):
def to_frame(self, filter_observations=True):
"""
- Transform wide format into long (stacked) format as DataFrame
+ Transform wide format into long (stacked) format as DataFrame whose
+ columns are the Panel's items and whose index is a MultiIndex formed
+ of the Panel's major and minor axes.
Parameters
----------
@@ -811,6 +813,7 @@ def to_frame(self, filter_observations=True):
_, N, K = self.shape
if filter_observations:
+ # shaped like the return DataFrame
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
@@ -822,19 +825,45 @@ def to_frame(self, filter_observations=True):
for item in self.items:
data[item] = self[item].values.ravel()[selector]
- major_labels = np.arange(N).repeat(K)[selector]
+ def construct_multi_parts(idx, n_repeat, n_shuffle=1):
+ axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
+ labels = [x[selector] for x in axis_idx.labels]
+ levels = axis_idx.levels
+ names = axis_idx.names
+ return labels, levels, names
+
+ def construct_index_parts(idx, major=True):
+ levels = [idx]
+ if major:
+ labels = [np.arange(N).repeat(K)[selector]]
+ names = idx.name or 'major'
+ else:
+ labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
+ labels = [labels.ravel()[selector]]
+ names = idx.name or 'minor'
+ names = [names]
+ return labels, levels, names
+
+ if isinstance(self.major_axis, MultiIndex):
+ major_labels, major_levels, major_names = construct_multi_parts(
+ self.major_axis, n_repeat=K)
+ else:
+ major_labels, major_levels, major_names = construct_index_parts(
+ self.major_axis)
- # Anyone think of a better way to do this? np.repeat does not
- # do what I want
- minor_labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
- minor_labels = minor_labels.ravel()[selector]
+ if isinstance(self.minor_axis, MultiIndex):
+ minor_labels, minor_levels, minor_names = construct_multi_parts(
+ self.minor_axis, n_repeat=N, n_shuffle=K)
+ else:
+ minor_labels, minor_levels, minor_names = construct_index_parts(
+ self.minor_axis, major=False)
- maj_name = self.major_axis.name or 'major'
- min_name = self.minor_axis.name or 'minor'
+ levels = major_levels + minor_levels
+ labels = major_labels + minor_labels
+ names = major_names + minor_names
- index = MultiIndex(levels=[self.major_axis, self.minor_axis],
- labels=[major_labels, minor_labels],
- names=[maj_name, min_name], verify_integrity=False)
+ index = MultiIndex(levels=levels, labels=labels,
+ names=names, verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 1afabc8d4c882..7daf95ac15a95 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1990,6 +1990,36 @@ def test_format_sparse_config(self):
warnings.filters = warn_filters
+ def test_to_hierarchical(self):
+ index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
+ (2, 'one'), (2, 'two')])
+ result = index.to_hierarchical(3)
+ expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
+ labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
+ tm.assert_index_equal(result, expected)
+ self.assertEqual(result.names, index.names)
+
+ # K > 1
+ result = index.to_hierarchical(3, 2)
+ expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
+ labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
+ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
+ tm.assert_index_equal(result, expected)
+ self.assertEqual(result.names, index.names)
+
+ # non-sorted
+ index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
+ (2, 'a'), (2, 'b')],
+ names=['N1', 'N2'])
+
+ result = index.to_hierarchical(2)
+ expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), (1, 'b'),
+ (2, 'a'), (2, 'a'), (2, 'b'), (2, 'b')],
+ names=['N1', 'N2'])
+ tm.assert_index_equal(result, expected)
+ self.assertEqual(result.names, index.names)
+
def test_bounds(self):
self.index._bounds
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 08d3afe63ec86..2589f7b82aedb 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1450,6 +1450,86 @@ def test_to_frame_mixed(self):
# Previously, this was mutating the underlying index and changing its name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
+ def test_to_frame_multi_major(self):
+ idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'),
+ (2, 'two')])
+ df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
+ columns=['A', 'B', 'C'], index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ expected_idx = MultiIndex.from_tuples([(1, 'one', 'A'), (1, 'one', 'B'),
+ (1, 'one', 'C'), (1, 'two', 'A'),
+ (1, 'two', 'B'), (1, 'two', 'C'),
+ (2, 'one', 'A'), (2, 'one', 'B'),
+ (2, 'one', 'C'), (2, 'two', 'A'),
+ (2, 'two', 'B'), (2, 'two', 'C')],
+ names=[None, None, 'minor'])
+ expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1],
+ 'i2': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1]},
+ index=expected_idx)
+ result = wp.to_frame()
+ assert_frame_equal(result, expected)
+
+ wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
+ result = wp.to_frame()
+ assert_frame_equal(result, expected[1:])
+
+ idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'),
+ (np.nan, 'two')])
+ df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
+ columns=['A', 'B', 'C'], index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'), (1, 'two', 'C'),
+ (1, 'one', 'A'), (1, 'one', 'B'), (1, 'one', 'C'),
+ (2, 'one', 'A'), (2, 'one', 'B'), (2, 'one', 'C'),
+ (np.nan, 'two', 'A'), (np.nan, 'two', 'B'),
+ (np.nan, 'two', 'C')],
+ names=[None, None, 'minor'])
+ expected.index = ex_idx
+ result = wp.to_frame()
+ assert_frame_equal(result, expected)
+
+ def test_to_frame_multi_major_minor(self):
+ cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'),
+ (2, 'two'), (3, 'three'), (4, 'four')])
+ df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14], ['a', 'b', 'w', 'x'],
+ ['c', 'd', 'y', 'z'], [-1, -2, -3, -4], [-5, -6, -7, -8]
+ ], columns=cols, index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+
+ exp_idx = MultiIndex.from_tuples([(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
+ (1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
+ (1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
+ (1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
+ (2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
+ (2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
+ (2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
+ (2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
+ (3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
+ (3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
+ (4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
+ (4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
+ names=[None, None, None, None])
+ exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4], [13, 13],
+ [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'], ['x', 'x'],
+ ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'], [-1, -1],
+ [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7],
+ [-8, -8]]
+ result = wp.to_frame()
+ expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
+ assert_frame_equal(result, expected)
+
+ def test_to_frame_multi_drop_level(self):
+ idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
+ df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
+ wp = Panel({'i1': df, 'i2': df})
+ result = wp.to_frame()
+ exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
+ names=[None, None, 'minor'])
+ expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
+ assert_frame_equal(result, expected)
+
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
| Closes #5402 WIP for now.
So this is a hack to get things "working".
I mainly just wanted to ask if there was a better way to get the levels and labels to the MultiIndex constructor.
Don't spend long thinking about it (this is my PR after all), but if you know of a quick way off the top of your head, I'd be interested.
I essentially need to
1. flatten the MultiIndex from major axis (levels and labels)
2. combine that flattened index with the minor axis (levels and labels)
Step 2 could be complicated by `minor_axis` being a MultiIndex also, I should be able to refactor this pretty easily to handle that. I think my was is making several copies of each index, and I'm not sure that they're all necessary.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5417 | 2013-11-02T19:37:15Z | 2014-01-15T16:12:41Z | 2014-01-15T16:12:41Z | 2016-11-03T12:37:41Z |
BUG: to_timedelta of a scalar returns a scalar, closes #5410. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 79fbc1a6cbb54..4b33c20424b33 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -770,6 +770,7 @@ Bug Fixes
and DataFrames that have repeated (non-unique) indices. (:issue:`4620`)
- Fix empty series not printing name in repr (:issue:`4651`)
- Make tests create temp files in temp directory by default. (:issue:`5419`)
+ - ``pd.to_timedelta`` of a scalar returns a scalar (:issue:`5410`)
pandas 0.12.0
-------------
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index fd9fb0ef0d79a..e5186cb3030ff 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2314,6 +2314,25 @@ def test_timedelta64_operations_with_timedeltas(self):
# roundtrip
assert_series_equal(result + td2,td1)
+ # Now again, using pd.to_timedelta, which should build
+ # a Series or a scalar, depending on input.
+ if not _np_version_under1p7:
+ td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
+ td2 = pd.to_timedelta('00:05:04')
+ result = td1 - td2
+ expected = Series([timedelta(seconds=0)] * 3) -Series(
+ [timedelta(seconds=1)] * 3)
+ self.assert_(result.dtype == 'm8[ns]')
+ assert_series_equal(result, expected)
+
+ result2 = td2 - td1
+ expected = (Series([timedelta(seconds=1)] * 3) -
+ Series([timedelta(seconds=0)] * 3))
+ assert_series_equal(result2, expected)
+
+ # roundtrip
+ assert_series_equal(result + td2,td1)
+
def test_timedelta64_operations_with_integers(self):
# GH 4521
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 64e5728f0f549..199ad19986b39 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -180,17 +180,20 @@ def test_timedelta_ops(self):
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
- result = td.mean()
+ result = td.mean()[0]
+ # TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta(timedelta(seconds=9))
- tm.assert_series_equal(result, expected)
+ tm.assert_almost_equal(result, expected)
result = td.quantile(.1)
+ # This properly returned a scalar.
expected = to_timedelta('00:00:02.6')
- tm.assert_series_equal(result, expected)
+ tm.assert_almost_equal(result, expected)
- result = td.median()
+ result = td.median()[0]
+ # TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta('00:00:08')
- tm.assert_series_equal(result, expected)
+ tm.assert_almost_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 24e4b1377cc45..862dc8d410996 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -58,7 +58,8 @@ def _convert_listlike(arg, box):
elif is_list_like(arg):
return _convert_listlike(arg, box=box)
- return _convert_listlike([ arg ], box=box)
+ # ...so it must be a scalar value. Return scalar.
+ return _coerce_scalar_to_timedelta_type(arg, unit=unit)
_short_search = re.compile(
"^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE)
| closes #5410
| https://api.github.com/repos/pandas-dev/pandas/pulls/5415 | 2013-11-02T12:55:39Z | 2013-11-04T21:19:02Z | 2013-11-04T21:19:02Z | 2014-06-20T16:34:01Z |
ENH/BUG: pass formatting params thru to `to_csv` | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 7b7b0e745872a..ac2cabe009694 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1036,8 +1036,10 @@ The Series and DataFrame objects have an instance method ``to_csv`` which
allows storing the contents of the object as a comma-separated-values file. The
function takes a number of arguments. Only the first is required.
- - ``path``: A string path to the file to write
+ - ``path_or_buf``: A string path to the file to write or a StringIO
+ - ``sep`` : Field delimiter for the output file (default ",")
- ``na_rep``: A string representation of a missing value (default '')
+ - ``float_format``: Format string for floating point numbers
- ``cols``: Columns to write (default None)
- ``header``: Whether to write out the column names (default True)
- ``index``: whether to write row (index) names (default True)
@@ -1045,11 +1047,18 @@ function takes a number of arguments. Only the first is required.
(default), and `header` and `index` are True, then the index names are
used. (A sequence should be given if the DataFrame uses MultiIndex).
- ``mode`` : Python write mode, default 'w'
- - ``sep`` : Field delimiter for the output file (default ",")
- ``encoding``: a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
- - ``tupleize_cols``: boolean, default False, if False, write as a list of tuples,
- otherwise write in an expanded line format suitable for ``read_csv``
+ - ``line_terminator``: Character sequence denoting line end (default '\\n')
+ - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL)
+ - ``quotechar``: Character used to quote fields (default '"')
+ - ``doublequote``: Control quoting of ``quotechar`` in fields (default True)
+ - ``escapechar``: Character used to escape ``sep`` and ``quotechar`` when
+ appropriate (default None)
+ - ``chunksize``: Number of rows to write at a time
+ - ``tupleize_cols``: If False (default), write as a list of tuples, otherwise
+ write in an expanded line format suitable for ``read_csv``
+ - ``date_format``: Format string for datetime objects
Writing a formatted string
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8b753abc83ca7..2440c8651006e 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -85,7 +85,8 @@ Improvements to existing features
- Performance improvement in indexing into a multi-indexed Series (:issue:`5567`)
- Testing statements updated to use specialized asserts (:issue: `6175`)
- ``Series.rank()`` now has a percentage rank option (:issue: `5971`)
-
+- ``quotechar``, ``doublequote``, and ``escapechar`` can now be specified when
+ using ``DataFrame.to_csv`` (:issue:`5414`, :issue:`4528`)
.. _release.bug_fixes-0.14.0:
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt
index 597067609bf7f..58ae5084c4827 100644
--- a/doc/source/v0.14.0.txt
+++ b/doc/source/v0.14.0.txt
@@ -179,6 +179,9 @@ Enhancements
household.join(portfolio, how='inner')
+- ``quotechar``, ``doublequote``, and ``escapechar`` can now be specified when
+ using ``DataFrame.to_csv`` (:issue:`5414`, :issue:`4528`)
+
Performance
~~~~~~~~~~~
diff --git a/pandas/core/format.py b/pandas/core/format.py
index f452ee11ae84f..04413970440b9 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -947,7 +947,8 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
- tupleize_cols=False, quotechar='"', date_format=None):
+ tupleize_cols=False, quotechar='"', date_format=None,
+ doublequote=True, escapechar=None):
self.engine = engine # remove for 0.13
self.obj = obj
@@ -972,6 +973,9 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
quotechar = None
self.quotechar = quotechar
+ self.doublequote = doublequote
+ self.escapechar = escapechar
+
self.line_terminator = line_terminator
self.date_format = date_format
@@ -1151,6 +1155,8 @@ def save(self):
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
+ doublequote=self.doublequote,
+ escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9f9af187d21dd..e66e09624a04f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1070,8 +1070,9 @@ def to_panel(self):
def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n', chunksize=None,
- tupleize_cols=False, date_format=None, **kwds):
+ quotechar='"', line_terminator='\n', chunksize=None,
+ tupleize_cols=False, date_format=None, doublequote=True,
+ escapechar=None, **kwds):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
@@ -1109,13 +1110,19 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
+ quotechar : string (length 1), default '"'
+ character used to quote fields
+ doublequote : boolean, default True
+ Control quoting of `quotechar` inside a field
+ escapechar : string (length 1), default None
+ character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
- Format string for datetime objects.
+ Format string for datetime objects
"""
if nanRep is not None: # pragma: no cover
warnings.warn("nanRep is deprecated, use na_rep",
@@ -1129,10 +1136,12 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
float_format=float_format, cols=cols,
header=header, index=index,
index_label=index_label, mode=mode,
- chunksize=chunksize, engine=kwds.get(
- "engine"),
+ chunksize=chunksize, quotechar=quotechar,
+ engine=kwds.get("engine"),
tupleize_cols=tupleize_cols,
- date_format=date_format)
+ date_format=date_format,
+ doublequote=doublequote,
+ escapechar=escapechar)
formatter.save()
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6b0d56b5c383e..e7d9145aa9d68 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -46,7 +46,8 @@
Default (None) results in QUOTE_MINIMAL behavior.
skipinitialspace : boolean, default False
Skip spaces after delimiter
-escapechar : string
+escapechar : string (length 1), default None
+ One-character string used to escape delimiter when quoting is QUOTE_NONE.
dtype : Type name or dict of column -> type
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
compression : {'gzip', 'bz2', None}, default None
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index a0fd992b3a532..ac42266b3c4eb 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1669,10 +1669,10 @@ def test_to_latex(self):
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
-
+
def test_to_latex_escape_special_chars(self):
special_characters = ['&','%','$','#','_',
- '{','}','~','^','\\']
+ '{','}','~','^','\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
@@ -1694,6 +1694,99 @@ def test_to_latex_escape_special_chars(self):
"""
self.assertEqual(observed, expected)
+ def test_to_csv_quotechar(self):
+ df = DataFrame({'col' : [1,2]})
+ expected = """\
+"","col"
+"0","1"
+"1","2"
+"""
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1) # 1=QUOTE_ALL
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, engine='python')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+
+ expected = """\
+$$,$col$
+$0$,$1$
+$1$,$2$
+"""
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, quotechar="$")
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, quotechar="$", engine='python')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+
+ with tm.ensure_clean('test.csv') as path:
+ with tm.assertRaisesRegexp(TypeError, 'quotechar'):
+ df.to_csv(path, quoting=1, quotechar=None)
+ with tm.ensure_clean('test.csv') as path:
+ with tm.assertRaisesRegexp(TypeError, 'quotechar'):
+ df.to_csv(path, quoting=1, quotechar=None, engine='python')
+
+ def test_to_csv_doublequote(self):
+ df = DataFrame({'col' : ['a"a', '"bb"']})
+ expected = '''\
+"","col"
+"0","a""a"
+"1","""bb"""
+'''
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, doublequote=True, engine='python')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+
+ from _csv import Error
+ with tm.ensure_clean('test.csv') as path:
+ with tm.assertRaisesRegexp(Error, 'escapechar'):
+ df.to_csv(path, doublequote=False) # no escapechar set
+ with tm.ensure_clean('test.csv') as path:
+ with tm.assertRaisesRegexp(Error, 'escapechar'):
+ df.to_csv(path, doublequote=False, engine='python')
+
+ def test_to_csv_escapechar(self):
+ df = DataFrame({'col' : ['a"a', '"bb"']})
+ expected = """\
+"","col"
+"0","a\\"a"
+"1","\\"bb\\""
+"""
+ with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
+ df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=1, doublequote=False, escapechar='\\',
+ engine='python')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+
+ df = DataFrame({'col' : ['a,a', ',bb,']})
+ expected = """\
+,col
+0,a\\,a
+1,\\,bb\\,
+"""
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+ with tm.ensure_clean('test.csv') as path:
+ df.to_csv(path, quoting=3, escapechar='\\', engine='python')
+ with open(path, 'r') as f:
+ self.assertEqual(f.read(), expected)
+
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
| Add support for passing remaining `csv.writer` formatting parameters thru to `DataFrame.to_csv()`.
Maybe write tests for this? That much is over my head currently.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5414 | 2013-11-02T01:32:10Z | 2014-02-17T14:19:06Z | 2014-02-17T14:19:06Z | 2014-06-14T17:02:45Z |
BLD: numpy 1.8 on the 3.3/2.7 builds | diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt
index 3b786152cd653..705aa9e3cc922 100644
--- a/ci/requirements-2.7.txt
+++ b/ci/requirements-2.7.txt
@@ -1,7 +1,7 @@
python-dateutil==2.1
pytz==2013b
xlwt==0.7.5
-numpy==1.7.1
+numpy==1.8.0
cython==0.19.1
bottleneck==0.6.0
numexpr==2.1
diff --git a/ci/requirements-3.2.txt b/ci/requirements-3.2.txt
index b44a708c4fffc..0f3bdcbac38cb 100644
--- a/ci/requirements-3.2.txt
+++ b/ci/requirements-3.2.txt
@@ -3,7 +3,7 @@ pytz==2013b
openpyxl==1.6.2
xlsxwriter==0.4.3
xlrd==0.9.2
-numpy==1.6.2
+numpy==1.7.1
cython==0.19.1
numexpr==2.1
tables==3.0.0
diff --git a/ci/requirements-3.3.txt b/ci/requirements-3.3.txt
index 94a77bbc06024..3ca888d1623e3 100644
--- a/ci/requirements-3.3.txt
+++ b/ci/requirements-3.3.txt
@@ -4,7 +4,7 @@ openpyxl==1.6.2
xlsxwriter==0.4.3
xlrd==0.9.2
html5lib==1.0b2
-numpy==1.7.1
+numpy==1.8.0
cython==0.19.1
numexpr==2.1
tables==3.0.0
| close #5412
still pretty good coverage of all numpys
build take a tad longer w/o the wheels
| https://api.github.com/repos/pandas-dev/pandas/pulls/5413 | 2013-11-01T22:51:05Z | 2013-11-02T23:54:38Z | 2013-11-02T23:54:38Z | 2014-06-23T21:43:09Z |
BLD: dateutil-2.2 patch | diff --git a/ci/requirements-3.3.txt b/ci/requirements-3.3.txt
index 318030e733158..94a77bbc06024 100644
--- a/ci/requirements-3.3.txt
+++ b/ci/requirements-3.3.txt
@@ -1,4 +1,4 @@
-python-dateutil==2.1
+python-dateutil==2.2
pytz==2013b
openpyxl==1.6.2
xlsxwriter==0.4.3
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 3d8803237931d..af1a31bcec311 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -305,6 +305,10 @@ def dateutil_parse(timestr, default,
res = DEFAULTPARSER._parse(fobj, **kwargs)
+ # dateutil 2.2 compat
+ if isinstance(res, tuple):
+ res, _ = res
+
if res is None:
raise ValueError("unknown string format")
| close #5409
| https://api.github.com/repos/pandas-dev/pandas/pulls/5411 | 2013-11-01T22:13:26Z | 2013-11-01T22:13:32Z | 2013-11-01T22:13:32Z | 2014-07-16T08:38:35Z |
DOC/BUG: fix details for 'quoting' parser parameter | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 52d4e6bbac50a..796475aa24a58 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -36,13 +36,13 @@
%s
lineterminator : string (length 1), default None
Character to break file into lines. Only valid with C parser
-quotechar : string
- The character to used to denote the start and end of a quoted item. Quoted
+quotechar : string (length 1)
+ The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
-quoting : int
- Controls whether quotes should be recognized. Values are taken from
- `csv.QUOTE_*` values. Acceptable values are 0, 1, 2, and 3 for
- QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONE, and QUOTE_NONNUMERIC, respectively.
+quoting : int or csv.QUOTE_* instance, default None
+ Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
+ QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
+ Default (None) results in QUOTE_MINIMAL behavior.
skipinitialspace : boolean, default False
Skip spaces after delimiter
escapechar : string
| ``` python
Python 2.7.5 (default, May 15 2013, 22:44:16) [MSC v.1500 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
Imported NumPy 1.7.1, SciPy 0.12.0, Matplotlib 1.3.0 + guidata 1.6.1, guiqwt 2.3.1
Type "scientific" for more details.
>>> from csv import QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE
>>> QUOTE_MINIMAL
0
>>> QUOTE_ALL
1
>>> QUOTE_NONNUMERIC
2
>>> QUOTE_NONE
3
>>>
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5408 | 2013-11-01T21:00:49Z | 2014-01-21T11:46:04Z | 2014-01-21T11:46:04Z | 2014-06-19T13:22:40Z |
Fix Issues with FY5253 and nearest w/ year end in Dec | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8830d66b245ef..b763fc6b11252 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -977,467 +977,154 @@ def onOffset(self, dt):
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
-class FY5253(CacheableOffset, DateOffset):
- """
- Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
- It is used by companies that desire that their
- fiscal year always end on the same day of the week.
+_int_to_month = {
+ 1: 'JAN',
+ 2: 'FEB',
+ 3: 'MAR',
+ 4: 'APR',
+ 5: 'MAY',
+ 6: 'JUN',
+ 7: 'JUL',
+ 8: 'AUG',
+ 9: 'SEP',
+ 10: 'OCT',
+ 11: 'NOV',
+ 12: 'DEC'
+}
- It is a method of managing accounting periods.
- It is a common calendar structure for some industries,
- such as retail, manufacturing and parking industry.
+_month_to_int = dict((v, k) for k, v in _int_to_month.items())
- For more information see:
- http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+# TODO: This is basically the same as BQuarterEnd
+class BQuarterBegin(CacheableOffset, QuarterOffset):
+ _outputName = "BusinessQuarterBegin"
+ # I suspect this is wrong for *all* of them.
+ _default_startingMonth = 3
+ _from_name_startingMonth = 1
+ _prefix = 'BQS'
- The year may either:
- - end on the last X day of the Y month.
- - end on the last X day closest to the last day of the Y month.
+ def apply(self, other):
+ n = self.n
+ other = as_datetime(other)
- X is a specific day of the week.
- Y is a certain month of the year
+ wkday, _ = tslib.monthrange(other.year, other.month)
- Parameters
- ----------
- n : int
- weekday : {0, 1, ..., 6}
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
- startingMonth : The month in which fiscal years end. {1, 2, ... 12}
- variation : str
- {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
- """
+ first = _get_firstbday(wkday)
- _prefix = 'RE'
- _suffix_prefix_last = 'L'
- _suffix_prefix_nearest = 'N'
+ monthsSince = (other.month - self.startingMonth) % 3
- def __init__(self, n=1, **kwds):
- self.n = n
- self.startingMonth = kwds['startingMonth']
- self.weekday = kwds["weekday"]
+ if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
+ monthsSince = monthsSince - 3
- self.variation = kwds["variation"]
+ # roll forward if on same month later than first bday
+ if n <= 0 and (monthsSince == 0 and other.day > first):
+ n = n + 1
+ # pretend to roll back if on same month but before firstbday
+ elif n > 0 and (monthsSince == 0 and other.day < first):
+ n = n - 1
- self.kwds = kwds
+ # get the first bday for result
+ other = other + relativedelta(months=3 * n - monthsSince)
+ wkday, _ = tslib.monthrange(other.year, other.month)
+ first = _get_firstbday(wkday)
+ result = datetime(other.year, other.month, first,
+ other.hour, other.minute, other.second,
+ other.microsecond)
+ return as_timestamp(result)
- if self.n == 0:
- raise ValueError('N cannot be 0')
- if self.variation not in ["nearest", "last"]:
- raise ValueError('%s is not a valid variation' % self.variation)
+class QuarterEnd(CacheableOffset, QuarterOffset):
+ """DateOffset increments between business Quarter dates
+ startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
+ startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
+ startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
+ """
+ _outputName = 'QuarterEnd'
+ _default_startingMonth = 3
+ _prefix = 'Q'
- if self.variation == "nearest":
- self._rd_forward = relativedelta(weekday=weekday(self.weekday))
- self._rd_backward = relativedelta(weekday=weekday(self.weekday)(-1))
- else:
- self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
+ def __init__(self, n=1, **kwds):
+ self.n = n
+ self.startingMonth = kwds.get('startingMonth', 3)
- def isAnchored(self):
- return self.n == 1 \
- and self.startingMonth is not None \
- and self.weekday is not None
+ self.kwds = kwds
- def onOffset(self, dt):
- year_end = self.get_year_end(dt)
- return year_end == dt
+ def isAnchored(self):
+ return (self.n == 1 and self.startingMonth is not None)
def apply(self, other):
n = self.n
- if n > 0:
- year_end = self.get_year_end(other)
- if other < year_end:
- other = year_end
- n -= 1
- elif other > year_end:
- other = self.get_year_end(as_datetime(other) + relativedelta(years=1))
- n -= 1
-
- return self.get_year_end(as_datetime(other) + relativedelta(years=n))
- else:
- n = -n
- year_end = self.get_year_end(other)
- if other > year_end:
- other = year_end
- n -= 1
- elif other < year_end:
- other = self.get_year_end(as_datetime(other) + relativedelta(years=-1))
- n -= 1
+ other = as_datetime(other)
- return self.get_year_end(as_datetime(other) + relativedelta(years=-n))
+ wkday, days_in_month = tslib.monthrange(other.year, other.month)
- def get_year_end(self, dt):
- if self.variation == "nearest":
- return self._get_year_end_nearest(dt)
- else:
- return self._get_year_end_last(dt)
+ monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
+ if monthsToGo == 3:
+ monthsToGo = 0
- def get_target_month_end(self, dt):
- target_month = datetime(year=dt.year, month=self.startingMonth, day=1)
- next_month_first_of = target_month + relativedelta(months=+1)
- return next_month_first_of + relativedelta(days=-1)
+ if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
+ n = n - 1
- def _get_year_end_nearest(self, dt):
- target_date = self.get_target_month_end(dt)
- if target_date.weekday() == self.weekday:
- return target_date
- else:
- forward = target_date + self._rd_forward
- backward = target_date + self._rd_backward
+ other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
- if forward - target_date < target_date - backward:
- return forward
- else:
- return backward
+ return as_timestamp(other)
- def _get_year_end_last(self, dt):
- current_year = datetime(year=dt.year, month=self.startingMonth, day=1)
- return current_year + self._offset_lwom
+ def onOffset(self, dt):
+ modMonth = (dt.month - self.startingMonth) % 3
+ return MonthEnd().onOffset(dt) and modMonth == 0
- @property
- def rule_code(self):
- suffix = self.get_rule_code_suffix()
- return "%s-%s" % (self._get_prefix(), suffix)
- def _get_prefix(self):
- return self._prefix
+class QuarterBegin(CacheableOffset, QuarterOffset):
+ _outputName = 'QuarterBegin'
+ _default_startingMonth = 3
+ _from_name_startingMonth = 1
+ _prefix = 'QS'
- def _get_suffix_prefix(self):
- if self.variation == "nearest":
- return self._suffix_prefix_nearest
- else:
- return self._suffix_prefix_last
+ def isAnchored(self):
+ return (self.n == 1 and self.startingMonth is not None)
- def get_rule_code_suffix(self):
- return '%s-%s-%s' % (self._get_suffix_prefix(), \
- _int_to_month[self.startingMonth], \
- _int_to_weekday[self.weekday])
+ def apply(self, other):
+ n = self.n
+ other = as_datetime(other)
- @classmethod
- def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
- if varion_code == "N":
- variation = "nearest"
- elif varion_code == "L":
- variation = "last"
- else:
- raise ValueError("Unable to parse varion_code: %s" % (varion_code,))
+ wkday, days_in_month = tslib.monthrange(other.year, other.month)
- startingMonth = _month_to_int[startingMonth_code]
- weekday = _weekday_to_int[weekday_code]
+ monthsSince = (other.month - self.startingMonth) % 3
- return {
- "weekday":weekday,
- "startingMonth":startingMonth,
- "variation":variation,
- }
+ if n <= 0 and monthsSince != 0:
+ # make sure you roll forward, so negate
+ monthsSince = monthsSince - 3
- @classmethod
- def _from_name(cls, *args):
- return cls(**cls._parse_suffix(*args))
+ if n < 0 and (monthsSince == 0 and other.day > 1):
+ # after start, so come back an extra period as if rolled forward
+ n = n + 1
-class FY5253Quarter(CacheableOffset, DateOffset):
- """
- DateOffset increments between business quarter dates
- for 52-53 week fiscal year (also known as a 4-4-5 calendar).
+ other = other + relativedelta(months=3 * n - monthsSince, day=1)
+ return as_timestamp(other)
- It is used by companies that desire that their
- fiscal year always end on the same day of the week.
- It is a method of managing accounting periods.
- It is a common calendar structure for some industries,
- such as retail, manufacturing and parking industry.
+class YearOffset(DateOffset):
+ """DateOffset that just needs a month"""
- For more information see:
- http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+ def __init__(self, n=1, **kwds):
+ self.month = kwds.get('month', self._default_month)
- The year may either:
- - end on the last X day of the Y month.
- - end on the last X day closest to the last day of the Y month.
+ if self.month < 1 or self.month > 12:
+ raise ValueError('Month must go from 1 to 12')
- X is a specific day of the week.
- Y is a certain month of the year
+ DateOffset.__init__(self, n=n, **kwds)
- startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
- startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
- startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
+ @classmethod
+ def _from_name(cls, suffix=None):
+ kwargs = {}
+ if suffix:
+ kwargs['month'] = _month_to_int[suffix]
+ return cls(**kwargs)
- Parameters
- ----------
- n : int
- weekday : {0, 1, ..., 6}
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
- startingMonth : The month in which fiscal years end. {1, 2, ... 12}
- qtr_with_extra_week : The quarter number that has the leap
- or 14 week when needed. {1,2,3,4}
- variation : str
- {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
- """
-
- _prefix = 'REQ'
-
- def __init__(self, n=1, **kwds):
- self.n = n
-
- self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
-
- self.kwds = kwds
-
- if self.n == 0:
- raise ValueError('N cannot be 0')
-
- self._offset = FY5253( \
- startingMonth=kwds['startingMonth'], \
- weekday=kwds["weekday"],
- variation=kwds["variation"])
-
- def isAnchored(self):
- return self.n == 1 and self._offset.isAnchored()
-
- def apply(self, other):
- other = as_datetime(other)
- n = self.n
-
- if n > 0:
- while n > 0:
- if not self._offset.onOffset(other):
- qtr_lens = self.get_weeks(other)
- start = other - self._offset
- else:
- start = other
- qtr_lens = self.get_weeks(other + self._offset)
-
- for weeks in qtr_lens:
- start += relativedelta(weeks=weeks)
- if start > other:
- other = start
- n -= 1
- break
-
- else:
- n = -n
- while n > 0:
- if not self._offset.onOffset(other):
- qtr_lens = self.get_weeks(other)
- end = other + self._offset
- else:
- end = other
- qtr_lens = self.get_weeks(other)
-
- for weeks in reversed(qtr_lens):
- end -= relativedelta(weeks=weeks)
- if end < other:
- other = end
- n -= 1
- break
-
- return other
-
- def get_weeks(self, dt):
- ret = [13] * 4
-
- year_has_extra_week = self.year_has_extra_week(dt)
-
- if year_has_extra_week:
- ret[self.qtr_with_extra_week-1] = 14
-
- return ret
-
- def year_has_extra_week(self, dt):
- if self._offset.onOffset(dt):
- prev_year_end = dt - self._offset
- next_year_end = dt
- else:
- next_year_end = dt + self._offset
- prev_year_end = dt - self._offset
-
- week_in_year = (next_year_end - prev_year_end).days/7
-
- return week_in_year == 53
-
- def onOffset(self, dt):
- if self._offset.onOffset(dt):
- return True
-
- next_year_end = dt - self._offset
-
- qtr_lens = self.get_weeks(dt)
-
- current = next_year_end
- for qtr_len in qtr_lens[0:4]:
- current += relativedelta(weeks=qtr_len)
- if dt == current:
- return True
- return False
-
- @property
- def rule_code(self):
- suffix = self._offset.get_rule_code_suffix()
- return "%s-%s" %(self._prefix, "%s-%d" % (suffix, self.qtr_with_extra_week))
-
- @classmethod
- def _from_name(cls, *args):
- return cls(**dict(FY5253._parse_suffix(*args[:-1]), qtr_with_extra_week=int(args[-1])))
-
-_int_to_month = {
- 1: 'JAN',
- 2: 'FEB',
- 3: 'MAR',
- 4: 'APR',
- 5: 'MAY',
- 6: 'JUN',
- 7: 'JUL',
- 8: 'AUG',
- 9: 'SEP',
- 10: 'OCT',
- 11: 'NOV',
- 12: 'DEC'
-}
-
-_month_to_int = dict((v, k) for k, v in _int_to_month.items())
-
-
-# TODO: This is basically the same as BQuarterEnd
-class BQuarterBegin(CacheableOffset, QuarterOffset):
- _outputName = "BusinessQuarterBegin"
- # I suspect this is wrong for *all* of them.
- _default_startingMonth = 3
- _from_name_startingMonth = 1
- _prefix = 'BQS'
-
- def apply(self, other):
- n = self.n
- other = as_datetime(other)
-
- wkday, _ = tslib.monthrange(other.year, other.month)
-
- first = _get_firstbday(wkday)
-
- monthsSince = (other.month - self.startingMonth) % 3
-
- if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
- monthsSince = monthsSince - 3
-
- # roll forward if on same month later than first bday
- if n <= 0 and (monthsSince == 0 and other.day > first):
- n = n + 1
- # pretend to roll back if on same month but before firstbday
- elif n > 0 and (monthsSince == 0 and other.day < first):
- n = n - 1
-
- # get the first bday for result
- other = other + relativedelta(months=3 * n - monthsSince)
- wkday, _ = tslib.monthrange(other.year, other.month)
- first = _get_firstbday(wkday)
- result = datetime(other.year, other.month, first,
- other.hour, other.minute, other.second,
- other.microsecond)
- return as_timestamp(result)
-
-
-class QuarterEnd(CacheableOffset, QuarterOffset):
- """DateOffset increments between business Quarter dates
- startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
- startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
- startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
- """
- _outputName = 'QuarterEnd'
- _default_startingMonth = 3
- _prefix = 'Q'
-
- def __init__(self, n=1, **kwds):
- self.n = n
- self.startingMonth = kwds.get('startingMonth', 3)
-
- self.kwds = kwds
-
- def isAnchored(self):
- return (self.n == 1 and self.startingMonth is not None)
-
- def apply(self, other):
- n = self.n
- other = as_datetime(other)
-
- wkday, days_in_month = tslib.monthrange(other.year, other.month)
-
- monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
- if monthsToGo == 3:
- monthsToGo = 0
-
- if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
- n = n - 1
-
- other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
-
- return as_timestamp(other)
-
- def onOffset(self, dt):
- modMonth = (dt.month - self.startingMonth) % 3
- return MonthEnd().onOffset(dt) and modMonth == 0
-
-
-class QuarterBegin(CacheableOffset, QuarterOffset):
- _outputName = 'QuarterBegin'
- _default_startingMonth = 3
- _from_name_startingMonth = 1
- _prefix = 'QS'
-
- def isAnchored(self):
- return (self.n == 1 and self.startingMonth is not None)
-
- def apply(self, other):
- n = self.n
- other = as_datetime(other)
-
- wkday, days_in_month = tslib.monthrange(other.year, other.month)
-
- monthsSince = (other.month - self.startingMonth) % 3
-
- if n <= 0 and monthsSince != 0:
- # make sure you roll forward, so negate
- monthsSince = monthsSince - 3
-
- if n < 0 and (monthsSince == 0 and other.day > 1):
- # after start, so come back an extra period as if rolled forward
- n = n + 1
-
- other = other + relativedelta(months=3 * n - monthsSince, day=1)
- return as_timestamp(other)
-
-
-class YearOffset(DateOffset):
- """DateOffset that just needs a month"""
-
- def __init__(self, n=1, **kwds):
- self.month = kwds.get('month', self._default_month)
-
- if self.month < 1 or self.month > 12:
- raise ValueError('Month must go from 1 to 12')
-
- DateOffset.__init__(self, n=n, **kwds)
-
- @classmethod
- def _from_name(cls, suffix=None):
- kwargs = {}
- if suffix:
- kwargs['month'] = _month_to_int[suffix]
- return cls(**kwargs)
-
- @property
- def rule_code(self):
- return '%s-%s' % (self._prefix, _int_to_month[self.month])
+ @property
+ def rule_code(self):
+ return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(CacheableOffset, YearOffset):
@@ -1611,6 +1298,359 @@ def onOffset(self, dt):
return dt.month == self.month and dt.day == 1
+class FY5253(CacheableOffset, DateOffset):
+ """
+ Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
+
+ It is used by companies that desire that their
+ fiscal year always end on the same day of the week.
+
+ It is a method of managing accounting periods.
+ It is a common calendar structure for some industries,
+ such as retail, manufacturing and parking industry.
+
+ For more information see:
+ http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+
+
+ The year may either:
+ - end on the last X day of the Y month.
+ - end on the last X day closest to the last day of the Y month.
+
+ X is a specific day of the week.
+ Y is a certain month of the year
+
+ Parameters
+ ----------
+ n : int
+ weekday : {0, 1, ..., 6}
+ 0: Mondays
+ 1: Tuesdays
+ 2: Wednesdays
+ 3: Thursdays
+ 4: Fridays
+ 5: Saturdays
+ 6: Sundays
+ startingMonth : The month in which fiscal years end. {1, 2, ... 12}
+ variation : str
+ {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
+ """
+
+ _prefix = 'RE'
+ _suffix_prefix_last = 'L'
+ _suffix_prefix_nearest = 'N'
+
+ def __init__(self, n=1, **kwds):
+ self.n = n
+ self.startingMonth = kwds['startingMonth']
+ self.weekday = kwds["weekday"]
+
+ self.variation = kwds["variation"]
+
+ self.kwds = kwds
+
+ if self.n == 0:
+ raise ValueError('N cannot be 0')
+
+ if self.variation not in ["nearest", "last"]:
+ raise ValueError('%s is not a valid variation' % self.variation)
+
+ if self.variation == "nearest":
+ weekday_offset = weekday(self.weekday)
+ self._rd_forward = relativedelta(weekday=weekday_offset)
+ self._rd_backward = relativedelta(weekday=weekday_offset(-1))
+ else:
+ self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
+
+ def isAnchored(self):
+ return self.n == 1 \
+ and self.startingMonth is not None \
+ and self.weekday is not None
+
+ def onOffset(self, dt):
+ year_end = self.get_year_end(dt)
+
+ if self.variation == "nearest":
+ # We have to check the year end of "this" cal year AND the previous
+ return year_end == dt or \
+ self.get_year_end(dt - relativedelta(months=1)) == dt
+ else:
+ return year_end == dt
+
+ def apply(self, other):
+ n = self.n
+ prev_year = self.get_year_end(
+ datetime(other.year - 1, self.startingMonth, 1))
+ cur_year = self.get_year_end(
+ datetime(other.year, self.startingMonth, 1))
+ next_year = self.get_year_end(
+ datetime(other.year + 1, self.startingMonth, 1))
+
+ if n > 0:
+ if other == prev_year:
+ year = other.year - 1
+ elif other == cur_year:
+ year = other.year
+ elif other == next_year:
+ year = other.year + 1
+ elif other < prev_year:
+ year = other.year - 1
+ n -= 1
+ elif other < cur_year:
+ year = other.year
+ n -= 1
+ elif other < next_year:
+ year = other.year + 1
+ n -= 1
+ else:
+ assert False
+
+ return self.get_year_end(datetime(year + n, self.startingMonth, 1))
+ else:
+ n = -n
+ if other == prev_year:
+ year = other.year - 1
+ elif other == cur_year:
+ year = other.year
+ elif other == next_year:
+ year = other.year + 1
+ elif other > next_year:
+ year = other.year + 1
+ n -= 1
+ elif other > cur_year:
+ year = other.year
+ n -= 1
+ elif other > prev_year:
+ year = other.year - 1
+ n -= 1
+ else:
+ assert False
+
+ return self.get_year_end(datetime(year - n, self.startingMonth, 1))
+
+ def get_year_end(self, dt):
+ if self.variation == "nearest":
+ return self._get_year_end_nearest(dt)
+ else:
+ return self._get_year_end_last(dt)
+
+ def get_target_month_end(self, dt):
+ target_month = datetime(year=dt.year, month=self.startingMonth, day=1)
+ next_month_first_of = target_month + relativedelta(months=+1)
+ return next_month_first_of + relativedelta(days=-1)
+
+ def _get_year_end_nearest(self, dt):
+ target_date = self.get_target_month_end(dt)
+ if target_date.weekday() == self.weekday:
+ return target_date
+ else:
+ forward = target_date + self._rd_forward
+ backward = target_date + self._rd_backward
+
+ if forward - target_date < target_date - backward:
+ return forward
+ else:
+ return backward
+
+ def _get_year_end_last(self, dt):
+ current_year = datetime(year=dt.year, month=self.startingMonth, day=1)
+ return current_year + self._offset_lwom
+
+ @property
+ def rule_code(self):
+ suffix = self.get_rule_code_suffix()
+ return "%s-%s" % (self._get_prefix(), suffix)
+
+ def _get_prefix(self):
+ return self._prefix
+
+ def _get_suffix_prefix(self):
+ if self.variation == "nearest":
+ return self._suffix_prefix_nearest
+ else:
+ return self._suffix_prefix_last
+
+ def get_rule_code_suffix(self):
+ return '%s-%s-%s' % (self._get_suffix_prefix(), \
+ _int_to_month[self.startingMonth], \
+ _int_to_weekday[self.weekday])
+
+ @classmethod
+ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
+ if varion_code == "N":
+ variation = "nearest"
+ elif varion_code == "L":
+ variation = "last"
+ else:
+ raise ValueError(
+ "Unable to parse varion_code: %s" % (varion_code,))
+
+ startingMonth = _month_to_int[startingMonth_code]
+ weekday = _weekday_to_int[weekday_code]
+
+ return {
+ "weekday": weekday,
+ "startingMonth": startingMonth,
+ "variation": variation,
+ }
+
+ @classmethod
+ def _from_name(cls, *args):
+ return cls(**cls._parse_suffix(*args))
+
+
+class FY5253Quarter(CacheableOffset, DateOffset):
+ """
+ DateOffset increments between business quarter dates
+ for 52-53 week fiscal year (also known as a 4-4-5 calendar).
+
+ It is used by companies that desire that their
+ fiscal year always end on the same day of the week.
+
+ It is a method of managing accounting periods.
+ It is a common calendar structure for some industries,
+ such as retail, manufacturing and parking industry.
+
+ For more information see:
+ http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+
+ The year may either:
+ - end on the last X day of the Y month.
+ - end on the last X day closest to the last day of the Y month.
+
+ X is a specific day of the week.
+ Y is a certain month of the year
+
+ startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
+ startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
+ startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
+
+ Parameters
+ ----------
+ n : int
+ weekday : {0, 1, ..., 6}
+ 0: Mondays
+ 1: Tuesdays
+ 2: Wednesdays
+ 3: Thursdays
+ 4: Fridays
+ 5: Saturdays
+ 6: Sundays
+ startingMonth : The month in which fiscal years end. {1, 2, ... 12}
+ qtr_with_extra_week : The quarter number that has the leap
+ or 14 week when needed. {1,2,3,4}
+ variation : str
+ {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
+ """
+
+ _prefix = 'REQ'
+
+ def __init__(self, n=1, **kwds):
+ self.n = n
+
+ self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
+
+ self.kwds = kwds
+
+ if self.n == 0:
+ raise ValueError('N cannot be 0')
+
+ self._offset = FY5253( \
+ startingMonth=kwds['startingMonth'], \
+ weekday=kwds["weekday"],
+ variation=kwds["variation"])
+
+ def isAnchored(self):
+ return self.n == 1 and self._offset.isAnchored()
+
+ def apply(self, other):
+ other = as_datetime(other)
+ n = self.n
+
+ if n > 0:
+ while n > 0:
+ if not self._offset.onOffset(other):
+ qtr_lens = self.get_weeks(other)
+ start = other - self._offset
+ else:
+ start = other
+ qtr_lens = self.get_weeks(other + self._offset)
+
+ for weeks in qtr_lens:
+ start += relativedelta(weeks=weeks)
+ if start > other:
+ other = start
+ n -= 1
+ break
+
+ else:
+ n = -n
+ while n > 0:
+ if not self._offset.onOffset(other):
+ qtr_lens = self.get_weeks(other)
+ end = other + self._offset
+ else:
+ end = other
+ qtr_lens = self.get_weeks(other)
+
+ for weeks in reversed(qtr_lens):
+ end -= relativedelta(weeks=weeks)
+ if end < other:
+ other = end
+ n -= 1
+ break
+
+ return other
+
+ def get_weeks(self, dt):
+ ret = [13] * 4
+
+ year_has_extra_week = self.year_has_extra_week(dt)
+
+ if year_has_extra_week:
+ ret[self.qtr_with_extra_week - 1] = 14
+
+ return ret
+
+ def year_has_extra_week(self, dt):
+ if self._offset.onOffset(dt):
+ prev_year_end = dt - self._offset
+ next_year_end = dt
+ else:
+ next_year_end = dt + self._offset
+ prev_year_end = dt - self._offset
+
+ week_in_year = (next_year_end - prev_year_end).days / 7
+
+ return week_in_year == 53
+
+ def onOffset(self, dt):
+ if self._offset.onOffset(dt):
+ return True
+
+ next_year_end = dt - self._offset
+
+ qtr_lens = self.get_weeks(dt)
+
+ current = next_year_end
+ for qtr_len in qtr_lens[0:4]:
+ current += relativedelta(weeks=qtr_len)
+ if dt == current:
+ return True
+ return False
+
+ @property
+ def rule_code(self):
+ suffix = self._offset.get_rule_code_suffix()
+ return "%s-%s" % (self._prefix,
+ "%s-%d" % (suffix, self.qtr_with_extra_week))
+
+ @classmethod
+ def _from_name(cls, *args):
+ return cls(**dict(FY5253._parse_suffix(*args[:-1]),
+ qtr_with_extra_week=int(args[-1])))
+
+
#----------------------------------------------------------------------
# Ticks
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index f66f57cc45409..008bda0a676bf 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -1304,10 +1304,25 @@ def test_get_year_end(self):
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SUN).get_year_end(datetime(2013,1,1)), datetime(2013,9,1))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.FRI).get_year_end(datetime(2013,1,1)), datetime(2013,8,30))
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest")
+ self.assertEqual(offset_n.get_year_end(datetime(2012,1,1)), datetime(2013,1,1))
+ self.assertEqual(offset_n.get_year_end(datetime(2012,1,10)), datetime(2013,1,1))
+
+ self.assertEqual(offset_n.get_year_end(datetime(2013,1,1)), datetime(2013,12,31))
+ self.assertEqual(offset_n.get_year_end(datetime(2013,1,2)), datetime(2013,12,31))
+ self.assertEqual(offset_n.get_year_end(datetime(2013,1,3)), datetime(2013,12,31))
+ self.assertEqual(offset_n.get_year_end(datetime(2013,1,10)), datetime(2013,12,31))
+
+ JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
+ self.assertEqual(JNJ.get_year_end(datetime(2006, 1, 1)), datetime(2006, 12, 31))
+
def test_onOffset(self):
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.THU)
-
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest")
+
tests = [
# From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
@@ -1354,21 +1369,39 @@ def test_onOffset(self):
#From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
-
+
+ (offset_n, datetime(2012, 12, 31), False),
+ (offset_n, datetime(2013, 1, 1), True),
+ (offset_n, datetime(2013, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
- date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1), datetime(2008, 8, 30), datetime(2009, 8, 29), datetime(2010, 8, 28), datetime(2011, 9, 3)]
+ date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
+ datetime(2008, 8, 30), datetime(2009, 8, 29),
+ datetime(2010, 8, 28), datetime(2011, 9, 3)]
+
+ JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
+ datetime(2006, 12, 31), datetime(2007, 12, 30),
+ datetime(2008, 12, 28), datetime(2010, 1, 3),
+ datetime(2011, 1, 2), datetime(2012, 1, 1),
+ datetime(2012, 12, 30)]
+
+ DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5, variation="nearest")
tests = [
- (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
- (makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_nem_8_sat))),
+ (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
+ (makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_nem_8_sat))),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), JNJ),
+ (makeFY5253NearestEndMonth(n=-1, startingMonth=12, weekday=WeekDay.SUN), list(reversed(JNJ))),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2005,1,2), datetime(2006, 1, 1)]),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2006,1,2), datetime(2006, 12, 31)]),
+ (DEC_SAT, [datetime(2013,1,15), datetime(2012,12,29)])
]
for test in tests:
offset, data = test
@@ -1512,9 +1545,12 @@ def test_year_has_extra_week(self):
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2)))
def test_get_weeks(self):
- self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).get_weeks(datetime(2011, 4, 2)), [14, 13, 13, 13])
- self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4).get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14])
- self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13])
+ sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1)
+ sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4)
+
+ self.assertEqual(sat_dec_1.get_weeks(datetime(2011, 4, 2)), [14, 13, 13, 13])
+ self.assertEqual(sat_dec_4.get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14])
+ self.assertEqual(sat_dec_1.get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13])
class TestFY5253NearestEndMonthQuarter(TestBase):
@@ -1522,6 +1558,9 @@ def test_onOffset(self):
offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4)
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest", qtr_with_extra_week=4)
+
tests = [
#From Wikipedia
(offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
@@ -1563,6 +1602,9 @@ def test_onOffset(self):
(offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
(offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
+ (offset_n, datetime(2012, 12, 31), False),
+ (offset_n, datetime(2013, 1, 1), True),
+ (offset_n, datetime(2013, 1, 2), False)
]
for offset, date, expected in tests:
@@ -1580,7 +1622,12 @@ def test_offset(self):
assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30))
assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31))
-
+
+ offset2 = FY5253Quarter(weekday=5, startingMonth=12,
+ variation="last", qtr_with_extra_week=4)
+
+ assertEq(offset2, datetime(2013,1,15), datetime(2013, 3, 30))
+
class TestQuarterBegin(TestBase):
def test_repr(self):
| There are currently some issues with FY5253 working with `variation="nearest"` and `startingMonth=12`.
This PR fixes those issues and add some more tests.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5405 | 2013-11-01T03:52:05Z | 2013-11-07T21:58:18Z | 2013-11-07T21:58:18Z | 2014-07-04T19:06:17Z |
CLN: Remove unnecessary ExcelWriterMeta metaclass | diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 44b323abf45c2..ae844d1eeb5fc 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -24,6 +24,7 @@
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
+
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
@@ -40,12 +41,14 @@ def register_writer(klass):
engine_name, validator=str)
_writer_extensions.append(ext)
+
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
+
def read_excel(io, sheetname, **kwds):
"""Read an Excel table into a pandas DataFrame
@@ -80,7 +83,7 @@ def read_excel(io, sheetname, **kwds):
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
-
+
Returns
-------
parsed : DataFrame
@@ -90,9 +93,9 @@ def read_excel(io, sheetname, **kwds):
kwds.pop('kind')
warn("kind keyword is no longer supported in read_excel and may be "
"removed in a future version", FutureWarning)
-
- engine = kwds.pop('engine', None)
-
+
+ engine = kwds.pop('engine', None)
+
return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
@@ -119,9 +122,9 @@ def __init__(self, io, **kwds):
"support, current version " + xlrd.__VERSION__)
self.io = io
-
+
engine = kwds.pop('engine', None)
-
+
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
@@ -133,7 +136,8 @@ def __init__(self, io, **kwds):
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
else:
- raise ValueError('Must explicitly set engine if not passing in buffer or path for io.')
+ raise ValueError('Must explicitly set engine if not passing in'
+ ' buffer or path for io.')
def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
@@ -291,6 +295,7 @@ def __enter__(self):
def __exit__(self, exc_type, exc_value, traceback):
self.close()
+
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
@@ -298,6 +303,7 @@ def _trim_excel_header(row):
row = row[1:]
return row
+
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
@@ -312,34 +318,45 @@ def _conv_value(val):
return val
-class ExcelWriterMeta(abc.ABCMeta):
+@add_metaclass(abc.ABCMeta)
+class ExcelWriter(object):
"""
- Metaclass that dynamically chooses the ExcelWriter to use.
-
- If you directly instantiate a subclass, it skips the engine lookup.
-
- Defining an ExcelWriter implementation (see abstract methods on ExcelWriter for more...).
-
- - Mandatory (but not checked at run time):
- - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
- --> called to write additional DataFrames to disk
- - ``supported_extensions`` (tuple of supported extensions), used to check
- that engine supports the given extension.
- - ``engine`` - string that gives the engine name. Necessary to
- instantiate class directly and bypass ``ExcelWriterMeta`` engine lookup.
- - ``save(self)`` --> called to save file to disk
- - Optional:
- - ``__init__(self, path, **kwargs)`` --> always called with path as first
- argument.
+ Class for writing DataFrame objects into excel sheets, default is to use
+ xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
- You also need to register the class with ``register_writer()``.
+ Parameters
+ ----------
+ path : string
+ Path to xls or xlsx file.
+ engine : string (optional)
+ Engine to use for writing. If None, defaults to
+ ``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
+ argument.
"""
-
- def __call__(cls, path, **kwargs):
- engine = kwargs.pop('engine', None)
- # if it's not an ExcelWriter baseclass, dont' do anything (you've
- # probably made an explicit choice here)
- if not isinstance(getattr(cls, 'engine', None), compat.string_types):
+ # Defining an ExcelWriter implementation (see abstract methods for more...)
+
+ # - Mandatory
+ # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
+ # --> called to write additional DataFrames to disk
+ # - ``supported_extensions`` (tuple of supported extensions), used to
+ # check that engine supports the given extension.
+ # - ``engine`` - string that gives the engine name. Necessary to
+ # instantiate class directly and bypass ``ExcelWriterMeta`` engine
+ # lookup.
+ # - ``save(self)`` --> called to save file to disk
+ # - Mostly mandatory (i.e. should at least exist)
+ # - book, cur_sheet, path
+
+ # - Optional:
+ # - ``__init__(self, path, engine=None, **kwargs)`` --> always called
+ # with path as first argument.
+
+ # You also need to register the class with ``register_writer()``.
+ # Technically, ExcelWriter implementations don't need to subclass
+ # ExcelWriter.
+ def __new__(cls, path, engine=None, **kwargs):
+ # only switch class if generic(ExcelWriter)
+ if cls == ExcelWriter:
if engine is None:
ext = os.path.splitext(path)[-1][1:]
try:
@@ -348,31 +365,14 @@ def __call__(cls, path, **kwargs):
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
- writer = cls.__new__(cls, path, **kwargs)
- writer.__init__(path, **kwargs)
- return writer
-
-@add_metaclass(ExcelWriterMeta)
-class ExcelWriter(object):
- """
- Class for writing DataFrame objects into excel sheets, default is to use
- xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
+ return object.__new__(cls)
- Parameters
- ----------
- path : string
- Path to xls or xlsx file.
- engine : string (optional)
- Engine to use for writing. If None, defaults to ``io.excel.<extension>.writer``.
- NOTE: can only be passed as a keyword argument.
- """
# declare external properties you can count on
book = None
curr_sheet = None
path = None
-
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
@@ -407,9 +407,6 @@ def save(self):
pass
def __init__(self, path, engine=None, **engine_kwargs):
- # note that subclasses will *never* get anything for engine
- # included here so that it's visible as part of the public signature.
-
# validate that this engine can handle the extnesion
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
@@ -455,7 +452,7 @@ class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
- def __init__(self, path, **engine_kwargs):
+ def __init__(self, path, engine=None, **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
@@ -511,6 +508,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
+
@classmethod
def _convert_to_style(cls, style_dict):
"""
@@ -539,7 +537,7 @@ class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
- def __init__(self, path, **engine_kwargs):
+ def __init__(self, path, engine=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
@@ -599,7 +597,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
val, style)
@classmethod
- def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',', line_sep=';'):
+ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
+ line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
@@ -617,12 +616,12 @@ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',', line_sep=';'):
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
- for key, value in item.items()]
+ for key, value in item.items()]
out = "%s " % (line_sep).join(it)
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
- for key, value in item.items()]
+ for key, value in item.items()]
out = "%s " % (field_sep).join(it)
return out
else:
@@ -659,11 +658,11 @@ class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
- def __init__(self, path, **engine_kwargs):
+ def __init__(self, path, engine=None, **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
- super(_XlsxWriter, self).__init__(path, **engine_kwargs)
+ super(_XlsxWriter, self).__init__(path, engine=engine, **engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
| Only `__new__` is really necessary. And subclasses just have to deal with
being passed an 'engine' keyword argument + a tad bit of style cleanup.
Thanks to @jseabold and @josef-pkt for making me realize that this was
unnecessary in https://github.com/statsmodels/statsmodels/issues/1167
:)
@jmcnamara - this has completely trivial impact on ExcelWriter
subclasses, just removes some magic.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5403 | 2013-10-31T22:20:02Z | 2013-11-01T00:21:36Z | 2013-11-01T00:21:36Z | 2014-06-30T12:55:30Z |
Display the correct error message when calling a binary moment without appropriate parameters. | diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index f3ec3880ec8b5..6671f0a9c958a 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -195,7 +195,7 @@ def _get_corr(a, b):
def _flex_binary_moment(arg1, arg2, f):
if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and
- isinstance(arg1,(np.ndarray, Series, DataFrame))):
+ isinstance(arg2,(np.ndarray, Series, DataFrame))):
raise ValueError("arguments to moment function must be of type ndarray/DataFrame")
if isinstance(arg1, (np.ndarray,Series)) and isinstance(arg2, (np.ndarray,Series)):
| This pr fixes a typo in the type checking of binary moments. It makes the error messages below consistent:
> > > import pandas
> > > a = pandas.DataFrame([1,2,3,4,5,6])
> > > pandas.rolling_cov(list(), a, 3)
> > > ValueError: arguments to moment function must be of type ndarray/DataFrame
> > >
> > > pandas.rolling_cov(a, list(), 3)
> > > TypeError: unsupported type: <type 'list'>
| https://api.github.com/repos/pandas-dev/pandas/pulls/5399 | 2013-10-31T15:41:15Z | 2013-11-21T13:44:48Z | 2013-11-21T13:44:48Z | 2014-06-22T05:58:40Z |
BUG/TST: fix downcasting of float-like object array | diff --git a/pandas/core/common.py b/pandas/core/common.py
index d05c3dbafdee6..e89ae44dacd31 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1038,7 +1038,8 @@ def _possibly_downcast_to_dtype(result, dtype):
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
- trans = lambda x: x.round()
+ if issubclass(result.dtype.type, np.number):
+ trans = lambda x: x.round()
else:
dtype = 'object'
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 3fd40062e1459..dfedfd629f736 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -135,6 +135,20 @@ def test_downcast_conv():
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
+ # conversions
+
+ expected = np.array([1,2])
+ for dtype in [np.float64,object,np.int64]:
+ arr = np.array([1.0,2.0],dtype=dtype)
+ result = com._possibly_downcast_to_dtype(arr,'infer')
+ tm.assert_almost_equal(result, expected)
+
+ expected = np.array([1.0,2.0,np.nan])
+ for dtype in [np.float64,object]:
+ arr = np.array([1.0,2.0,np.nan],dtype=dtype)
+ result = com._possibly_downcast_to_dtype(arr,'infer')
+ tm.assert_almost_equal(result, expected)
+
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
| related #5394
This now works
```
In [1]: arr = np.array([1.0,2.0],dtype=object)
In [3]: pandas.core.common._possibly_downcast_to_dtype(arr,'infer')
Out[3]: array([1, 2])
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5397 | 2013-10-31T12:04:13Z | 2013-10-31T12:30:10Z | 2013-10-31T12:30:10Z | 2014-07-16T08:38:18Z |
ENH: read_excel: try converting numeric to int | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 0842893800dd5..1a879866c5516 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1839,6 +1839,13 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`.
df1.to_excel(writer, sheet_name='Sheet1')
df2.to_excel(writer, sheet_name='Sheet2')
+.. note:: Wringing a little more performance out of ``read_excel``
+ Internally, Excel stores all numeric data as floats. Because this can
+ produce unexpected behavior when reading in data, pandas defaults to trying
+ to convert integers to floats if it doesn't lose information (``1.0 -->
+ 1``). You can pass ``convert_float=False`` to disable this behavior, which
+ may give a slight performance improvement.
+
.. _io.excel.writers:
Excel writer engines
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8a0e31859c185..721ef9a1cbbf3 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -207,6 +207,8 @@ Improvements to existing features
closed])
- Fixed bug in `tools.plotting.andrews_curvres` so that lines are drawn grouped
by color as expected.
+ - ``read_excel()`` now tries to convert integral floats (like ``1.0``) to int
+ by default. (:issue:`5394`)
API Changes
~~~~~~~~~~~
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index ae844d1eeb5fc..42c212caf41ca 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -83,6 +83,10 @@ def read_excel(io, sheetname, **kwds):
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
+ convert_float : boolean, default True
+ convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
+ data will be read in as floats: Excel stores all numbers as floats
+ internally.
Returns
-------
@@ -142,7 +146,7 @@ def __init__(self, io, **kwds):
def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
- **kwds):
+ convert_float=True, **kwds):
"""Read an Excel table into DataFrame
Parameters
@@ -172,6 +176,10 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
NaN values are overridden, otherwise they're appended to
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
+ convert_float : boolean, default True
+ convert integral floats to int (i.e., 1.0 --> 1). If False, all
+ numeric data will be read in as floats: Excel stores all numbers as
+ floats internally.
Returns
-------
@@ -191,7 +199,9 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values,
thousands=thousands, chunksize=chunksize,
- skip_footer=skip_footer, **kwds)
+ skip_footer=skip_footer,
+ convert_float=convert_float,
+ **kwds)
def _should_parse(self, i, parse_cols):
@@ -229,9 +239,11 @@ def _excel2num(x):
def _parse_excel(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, has_index_names=None, parse_cols=None,
parse_dates=False, date_parser=None, na_values=None,
- thousands=None, chunksize=None, **kwds):
+ thousands=None, chunksize=None, convert_float=True,
+ **kwds):
from xlrd import (xldate_as_tuple, XL_CELL_DATE,
- XL_CELL_ERROR, XL_CELL_BOOLEAN)
+ XL_CELL_ERROR, XL_CELL_BOOLEAN,
+ XL_CELL_NUMBER)
datemode = self.book.datemode
if isinstance(sheetname, compat.string_types):
@@ -260,6 +272,13 @@ def _parse_excel(self, sheetname, header=0, skiprows=None, skip_footer=0,
value = np.nan
elif typ == XL_CELL_BOOLEAN:
value = bool(value)
+ elif convert_float and typ == XL_CELL_NUMBER:
+ # GH5394 - Excel 'numbers' are always floats
+ # it's a minimal perf hit and less suprising
+ val = int(value)
+ if val == value:
+ value = val
+
row.append(value)
data.append(row)
diff --git a/pandas/io/tests/data/test_types.xls b/pandas/io/tests/data/test_types.xls
new file mode 100644
index 0000000000000..2d387603a8307
Binary files /dev/null and b/pandas/io/tests/data/test_types.xls differ
diff --git a/pandas/io/tests/data/test_types.xlsx b/pandas/io/tests/data/test_types.xlsx
new file mode 100644
index 0000000000000..ef749e04ff3b5
Binary files /dev/null and b/pandas/io/tests/data/test_types.xlsx differ
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 15130c552c8a8..311a0953f1c02 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -1,6 +1,7 @@
# pylint: disable=E1101
from pandas.compat import u, range, map
+from datetime import datetime
import os
import unittest
@@ -306,6 +307,56 @@ def test_reader_closes_file(self):
self.assertTrue(f.closed)
+ def test_reader_special_dtypes(self):
+ _skip_if_no_xlrd()
+
+ expected = DataFrame.from_items([
+ ("IntCol", [1, 2, -3, 4, 0]),
+ ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
+ ("BoolCol", [True, False, True, True, False]),
+ ("StrCol", [1, 2, 3, 4, 5]),
+ # GH5394 - this is why convert_float isn't vectorized
+ ("Str2Col", ["a", 3, "c", "d", "e"]),
+ ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
+ datetime(1905, 1, 1), datetime(2013, 12, 14),
+ datetime(2015, 3, 14)])
+ ])
+
+ xlsx_path = os.path.join(self.dirpath, 'test_types.xlsx')
+ xls_path = os.path.join(self.dirpath, 'test_types.xls')
+
+ # should read in correctly and infer types
+ for path in (xls_path, xlsx_path):
+ actual = read_excel(path, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ # if not coercing number, then int comes in as float
+ float_expected = expected.copy()
+ float_expected["IntCol"] = float_expected["IntCol"].astype(float)
+ float_expected.loc[1, "Str2Col"] = 3.0
+ for path in (xls_path, xlsx_path):
+ actual = read_excel(path, 'Sheet1', convert_float=False)
+ tm.assert_frame_equal(actual, float_expected)
+
+ # check setting Index (assuming xls and xlsx are the same here)
+ for icol, name in enumerate(expected.columns):
+ actual = read_excel(xlsx_path, 'Sheet1', index_col=icol)
+ actual2 = read_excel(xlsx_path, 'Sheet1', index_col=name)
+ exp = expected.set_index(name)
+ tm.assert_frame_equal(actual, exp)
+ tm.assert_frame_equal(actual2, exp)
+
+ # convert_float and converters should be different but both accepted
+ expected["StrCol"] = expected["StrCol"].apply(str)
+ actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str})
+ tm.assert_frame_equal(actual, expected)
+
+ no_convert_float = float_expected.copy()
+ no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
+ actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str},
+ convert_float=False)
+ tm.assert_frame_equal(actual, no_convert_float)
+
class ExcelWriterBase(SharedItems):
# Base class for test cases to run with different Excel writers.
@@ -390,7 +441,7 @@ def test_roundtrip(self):
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='88')
- recons = read_excel(path, 'test1', index_col=0, na_values=[88,88.0])
+ recons = read_excel(path, 'test1', index_col=0, na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
def test_mixed(self):
@@ -417,6 +468,16 @@ def test_tsframe(self):
recons = reader.parse('test1')
tm.assert_frame_equal(df, recons)
+ def test_basics_with_nan(self):
+ _skip_if_no_xlrd()
+ ext = self.ext
+ path = '__tmp_to_excel_from_excel_int_types__.' + ext
+ self.frame['A'][:5] = nan
+ self.frame.to_excel(path, 'test1')
+ self.frame.to_excel(path, 'test1', cols=['A', 'B'])
+ self.frame.to_excel(path, 'test1', header=False)
+ self.frame.to_excel(path, 'test1', index=False)
+
def test_int_types(self):
_skip_if_no_xlrd()
ext = self.ext
@@ -425,20 +486,22 @@ def test_int_types(self):
for np_type in (np.int8, np.int16, np.int32, np.int64):
with ensure_clean(path) as path:
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
- # Test np.int values read come back as float.
+ # Test np.int values read come back as int (rather than float
+ # which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
- recons = reader.parse('test1').astype(np_type)
- tm.assert_frame_equal(frame, recons, check_dtype=False)
+ recons = reader.parse('test1')
+ int_frame = frame.astype(int)
+ tm.assert_frame_equal(int_frame, recons)
+ recons2 = read_excel(path, 'test1')
+ tm.assert_frame_equal(int_frame, recons2)
+
+ # test with convert_float=False comes back as float
+ float_frame = frame.astype(float)
+ recons = read_excel(path, 'test1', convert_float=False)
+ tm.assert_frame_equal(recons, float_frame)
def test_float_types(self):
_skip_if_no_xlrd()
@@ -447,13 +510,6 @@ def test_float_types(self):
for np_type in (np.float16, np.float32, np.float64):
with ensure_clean(path) as path:
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(path, 'test1')
@@ -468,13 +524,6 @@ def test_bool_types(self):
for np_type in (np.bool8, np.bool_):
with ensure_clean(path) as path:
- self.frame['A'][:5] = nan
-
- self.frame.to_excel(path, 'test1')
- self.frame.to_excel(path, 'test1', cols=['A', 'B'])
- self.frame.to_excel(path, 'test1', header=False)
- self.frame.to_excel(path, 'test1', index=False)
-
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(path, 'test1')
@@ -1007,11 +1056,11 @@ def test_ExcelWriter_dispatch(self):
writer = ExcelWriter('apple.xls')
tm.assert_isinstance(writer, _XlwtWriter)
-
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
+
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
| Adds a convert_float=True kwarg to read_excel and parse.
All Excel numeric data is stored as floats, so have to convert it specifically.
Changing default because it's suprising to save something with what looks like
a row/column of integers and get a column of floats instead. (especially
because it can lead to annoying Float64Indexes)
Resolves recent ML issue too...
| https://api.github.com/repos/pandas-dev/pandas/pulls/5394 | 2013-10-31T00:56:03Z | 2013-11-01T02:52:15Z | 2013-11-01T02:52:15Z | 2021-06-09T03:27:30Z |
BUG: groupby with a Float like index misbehaving when the index is non-monotonic (related GH5375) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0ef2c29af8139..13d8af52ee0dd 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -345,7 +345,7 @@ API Changes
indexing and slicing work exactly the same. Indexing on other index types
are preserved (and positional fallback for ``[],ix``), with the exception,
that floating point slicing on indexes on non ``Float64Index`` will raise a
- ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`)
+ ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`,:issue:`5375`)
- Make Categorical repr nicer (:issue:`4368`)
- Remove deprecated ``Factor`` (:issue:`3650`)
- Remove deprecated ``set_printoptions/reset_printoptions`` (:issue:``3046``)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index e5447e5f8f58f..4beb6ecf1a63b 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2435,7 +2435,7 @@ def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
- return sdata[slice_obj]
+ return sdata.iloc[slice_obj]
def apply(self, f):
raise NotImplementedError
@@ -2470,7 +2470,7 @@ def fast_apply(self, f, names):
def _chop(self, sdata, slice_obj):
if self.axis == 0:
- return sdata[slice_obj]
+ return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 29f64090ddb11..f71d7ff9d096b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -202,6 +202,20 @@ def test_first_last_nth_dtypes(self):
f = s.groupby(level=0).first()
self.assert_(f.dtype == 'int64')
+ def test_grouper_index_types(self):
+ # related GH5375
+ # groupby misbehaving when using a Floatlike index
+ df = DataFrame(np.arange(10).reshape(5,2),columns=list('AB'))
+ for index in [ tm.makeFloatIndex, tm.makeStringIndex,
+ tm.makeUnicodeIndex, tm.makeIntIndex,
+ tm.makeDateIndex, tm.makePeriodIndex ]:
+
+ df.index = index(len(df))
+ df.groupby(list('abcde')).apply(lambda x: x)
+
+ df.index = list(reversed(df.index.tolist()))
+ df.groupby(list('abcde')).apply(lambda x: x)
+
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
| related #5375
| https://api.github.com/repos/pandas-dev/pandas/pulls/5393 | 2013-10-31T00:19:00Z | 2013-10-31T00:30:26Z | 2013-10-31T00:30:26Z | 2014-07-16T08:38:13Z |
API: raise/warn SettingWithCopy when chained assignment is detected | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index e03d10b045824..b95c515831f55 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1330,24 +1330,34 @@ indexing operation, the result will be a copy. With single label / scalar
indexing and slicing, e.g. ``df.ix[3:6]`` or ``df.ix[:, 'A']``, a view will be
returned.
-In chained expressions, the order may determine whether a copy is returned or not:
+In chained expressions, the order may determine whether a copy is returned or not.
+If an expression will set values on a copy of a slice, then a ``SettingWithCopy``
+exception will be raised (this raise/warn behavior is new starting in 0.13.0)
-.. ipython:: python
+You can control the action of a chained assignment via the option ``mode.chained_assignment``,
+which can take the values ``['raise','warn',None]``, where showing a warning is the default.
+.. ipython:: python
dfb = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
- 'b' : ['x', 'y', 'y',
- 'x', 'y', 'x', 'x'],
- 'c' : randn(7)})
-
-
- # goes to copy (will be lost)
- dfb[dfb.a.str.startswith('o')]['c'] = 42
+ 'c' : np.arange(7)})
# passed via reference (will stay)
dfb['c'][dfb.a.str.startswith('o')] = 42
+This however is operating on a copy and will not work.
+
+::
+
+ >>> pd.set_option('mode.chained_assignment','warn')
+ >>> dfb[dfb.a.str.startswith('o')]['c'] = 42
+ Traceback (most recent call last)
+ ...
+ SettingWithCopyWarning:
+ A value is trying to be set on a copy of a slice from a DataFrame.
+ Try using .loc[row_index,col_indexer] = value instead
+
A chained assignment can also crop up in setting in a mixed dtype frame.
.. note::
@@ -1359,28 +1369,35 @@ This is the correct access method
.. ipython:: python
dfc = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
- dfc_copy = dfc.copy()
- dfc_copy.loc[0,'A'] = 11
- dfc_copy
+ dfc.loc[0,'A'] = 11
+ dfc
This *can* work at times, but is not guaranteed, and so should be avoided
.. ipython:: python
- dfc_copy = dfc.copy()
- dfc_copy['A'][0] = 111
- dfc_copy
+ dfc = dfc.copy()
+ dfc['A'][0] = 111
+ dfc
This will **not** work at all, and so should be avoided
-.. ipython:: python
+::
+
+ >>> pd.set_option('mode.chained_assignment','raise')
+ >>> dfc.loc[0]['A'] = 1111
+ Traceback (most recent call last)
+ ...
+ SettingWithCopyException:
+ A value is trying to be set on a copy of a slice from a DataFrame.
+ Try using .loc[row_index,col_indexer] = value instead
+
+.. warning::
- dfc_copy = dfc.copy()
- dfc_copy.loc[0]['A'] = 1111
- dfc_copy
+ The chained assignment warnings / exceptions are aiming to inform the user of a possibly invalid
+ assignment. There may be false positives; situations where a chained assignment is inadvertantly
+ reported.
-When assigning values to subsets of your data, thus, make sure to either use the
-pandas access methods or explicitly handle the assignment creating a copy.
Fallback indexing
-----------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 926e8f1d0c5ea..2e9654b2131f1 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -396,6 +396,9 @@ API Changes
3 4.000000
dtype: float64
+ - raise/warn ``SettingWithCopyError/Warning`` exception/warning when setting of a
+ copy thru chained assignment is detected, settable via option ``mode.chained_assignment``
+
Internal Refactoring
~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index c736a52cd1e71..b3f831af35339 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -104,6 +104,34 @@ API changes
- ``Series`` and ``DataFrame`` now have a ``mode()`` method to calculate the
statistical mode(s) by axis/Series. (:issue:`5367`)
+- Chained assignment will now by default warn if the user is assigning to a copy. This can be changed
+ with he option ``mode.chained_assignment``, allowed options are ``raise/warn/None``. See :ref:`the docs<indexing.view_versus_copy>`.
+
+ .. ipython:: python
+
+ dfc = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
+ pd.set_option('chained_assignment','warn')
+
+ The following warning / exception will show if this is attempted.
+
+ .. ipython:: python
+
+ dfc.loc[0]['A'] = 1111
+
+ ::
+
+ Traceback (most recent call last)
+ ...
+ SettingWithCopyWarning:
+ A value is trying to be set on a copy of a slice from a DataFrame.
+ Try using .loc[row_index,col_indexer] = value instead
+
+ Here is the correct method of assignment.
+
+ .. ipython:: python
+
+ dfc.loc[0,'A'] = 11
+ dfc
Prior Version Deprecations/Changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e89ae44dacd31..453227aec6e23 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -26,6 +26,11 @@
class PandasError(Exception):
pass
+class SettingWithCopyError(ValueError):
+ pass
+
+class SettingWithCopyWarning(Warning):
+ pass
class AmbiguousIndexError(PandasError, KeyError):
pass
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 9de596142e7e0..20ec30398fd64 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -512,6 +512,13 @@ def _get_root(key):
cursor = cursor[p]
return cursor, path[-1]
+def _get_option_fast(key):
+ """ internal quick access routine, no error checking """
+ path = key.split('.')
+ cursor = _global_config
+ for p in path:
+ cursor = cursor[p]
+ return cursor
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 13f7a3dbe7d4a..1275a5463cbe3 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -271,7 +271,6 @@ def mpl_style_cb(key):
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
-
def use_inf_as_null_cb(key):
from pandas.core.common import _use_inf_as_null
_use_inf_as_null(key)
@@ -281,6 +280,17 @@ def use_inf_as_null_cb(key):
cb=use_inf_as_null_cb)
+# user warnings
+chained_assignment = """
+: string
+ Raise an exception, warn, or no action if trying to use chained assignment, The default is warn
+"""
+
+with cf.config_prefix('mode'):
+ cf.register_option('chained_assignment', 'warn', chained_assignment,
+ validator=is_one_of_factory([None, 'warn', 'raise']))
+
+
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1e843e40037b1..280203fa65232 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1547,12 +1547,9 @@ def _ixs(self, i, axis=0, copy=False):
i = _maybe_convert_indices(i, len(self._get_axis(axis)))
return self.reindex(i, takeable=True)
else:
- try:
- new_values = self._data.fast_2d_xs(i, copy=copy)
- except:
- new_values = self._data.fast_2d_xs(i, copy=True)
+ new_values, copy = self._data.fast_2d_xs(i, copy=copy)
return Series(new_values, index=self.columns,
- name=self.index[i])
+ name=self.index[i])._setitem_copy(copy)
# icol
else:
@@ -1892,10 +1889,18 @@ def _set_item(self, key, value):
Series/TimeSeries will be conformed to the DataFrame's index to
ensure homogeneity.
"""
+
+ is_existing = key in self.columns
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
+ # check if we are modifying a copy
+ # try to set first as we want an invalid
+ # value exeption to occur first
+ if is_existing:
+ self._check_setitem_copy()
+
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
@@ -2093,13 +2098,16 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True):
new_index = self.index[loc]
if np.isscalar(loc):
- new_values = self._data.fast_2d_xs(loc, copy=copy)
- return Series(new_values, index=self.columns,
- name=self.index[loc])
+
+ new_values, copy = self._data.fast_2d_xs(loc, copy=copy)
+ result = Series(new_values, index=self.columns,
+ name=self.index[loc])._setitem_copy(copy)
+
else:
result = self[loc]
result.index = new_index
- return result
+
+ return result
_xs = xs
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b5e526e42a547..30dccb971ae18 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -19,9 +19,11 @@
from pandas import compat, _np_version_under1p7
from pandas.compat import map, zip, lrange, string_types, isidentifier
from pandas.core.common import (isnull, notnull, is_list_like,
- _values_from_object, _maybe_promote, ABCSeries)
+ _values_from_object, _maybe_promote, ABCSeries,
+ SettingWithCopyError, SettingWithCopyWarning)
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, Substitution
+from pandas.core import config
# goal is to be able to define the docs close to function, while still being
# able to share
@@ -69,7 +71,7 @@ class NDFrame(PandasObject):
copy : boolean, default False
"""
_internal_names = [
- '_data', 'name', '_cacher', '_subtyp', '_index', '_default_kind', '_default_fill_value']
+ '_data', 'name', '_cacher', '_is_copy', '_subtyp', '_index', '_default_kind', '_default_fill_value']
_internal_names_set = set(_internal_names)
_metadata = []
@@ -85,6 +87,7 @@ def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False):
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
+ object.__setattr__(self, '_is_copy', False)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
@@ -988,6 +991,22 @@ def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
+ def _setitem_copy(self, copy):
+ """ set the _is_copy of the iiem """
+ self._is_copy = copy
+ return self
+
+ def _check_setitem_copy(self):
+ """ validate if we are doing a settitem on a chained copy """
+ if self._is_copy:
+ value = config._get_option_fast('mode.chained_assignment')
+
+ t = "A value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_index,col_indexer] = value instead"
+ if value == 'raise':
+ raise SettingWithCopyError(t)
+ elif value == 'warn':
+ warnings.warn(t,SettingWithCopyWarning)
+
def __delitem__(self, key):
"""
Delete item
@@ -1049,7 +1068,7 @@ def take(self, indices, axis=0, convert=True):
new_data = self._data.reindex_axis(new_items, indexer=indices, axis=0)
else:
new_data = self._data.take(indices, axis=baxis)
- return self._constructor(new_data).__finalize__(self)
+ return self._constructor(new_data)._setitem_copy(True).__finalize__(self)
# TODO: Check if this was clearer in 0.12
def select(self, crit, axis=0):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 62aa95d270924..ae22d3d406c7e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2567,22 +2567,20 @@ def fast_2d_xs(self, loc, copy=False):
"""
get a cross sectional for a given location in the
items ; handle dups
+
+ return the result and a flag if a copy was actually made
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
- return result
-
- if not copy:
- raise TypeError('cannot get view of mixed-type or '
- 'non-consolidated DataFrame')
+ return result, copy
items = self.items
# non-unique (GH4726)
if not items.is_unique:
- return self._interleave(items).ravel()
+ return self._interleave(items).ravel(), True
# unique
dtype = _interleaved_dtype(self.blocks)
@@ -2593,7 +2591,7 @@ def fast_2d_xs(self, loc, copy=False):
i = items.get_loc(item)
result[i] = blk._try_coerce_result(blk.iget((j, loc)))
- return result
+ return result, True
def consolidate(self):
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e62bf2f36d209..3e8202c7ec0b6 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -22,7 +22,8 @@
_values_from_object,
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform,
- ABCSparseArray, _maybe_match_name, _ensure_object)
+ ABCSparseArray, _maybe_match_name, _ensure_object,
+ SettingWithCopyError)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index, _handle_legacy_indexes)
@@ -575,6 +576,8 @@ def __setitem__(self, key, value):
try:
self._set_with_engine(key, value)
return
+ except (SettingWithCopyError):
+ raise
except (KeyError, ValueError):
values = self.values
if (com.is_integer(key)
@@ -623,6 +626,7 @@ def _set_with_engine(self, key, value):
values = self.values
try:
self.index._engine.set_value(values, key, value)
+ self._check_setitem_copy()
return
except KeyError:
values[self.index.get_loc(key)] = value
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index e0abe7700f28d..ffc40ffbadc39 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11059,9 +11059,11 @@ def test_xs_view(self):
dm.xs(2)[:] = 10
self.assert_((dm.xs(2) == 5).all())
+ # prior to chained assignment (GH5390)
+ # this would raise, but now just rrens a copy (and sets _is_copy)
# TODO (?): deal with mixed-type fiasco?
- with assertRaisesRegexp(TypeError, 'cannot get view of mixed-type'):
- self.mixed_frame.xs(self.mixed_frame.index[2], copy=False)
+ # with assertRaisesRegexp(TypeError, 'cannot get view of mixed-type'):
+ # self.mixed_frame.xs(self.mixed_frame.index[2], copy=False)
# unconsolidated
dm['foo'] = 6.
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 2ad9f10d1b990..5732d2ad56a4f 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -987,6 +987,7 @@ def f():
NUM_COLS = 10
col_names = ['A'+num for num in map(str,np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
+
df = DataFrame(np.random.randint(5, size=(NUM_ROWS,NUM_COLS)), dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
@@ -1680,6 +1681,61 @@ def test_setitem_cache_updating(self):
self.assert_(df.ix[0,'c'] == 0.0)
self.assert_(df.ix[7,'c'] == 1.0)
+ def test_detect_chained_assignment(self):
+
+ pd.set_option('chained_assignment','raise')
+
+ # work with the chain
+ expected = DataFrame([[-5,1],[-6,3]],columns=list('AB'))
+ df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'))
+ self.assert_(not df._is_copy)
+
+ df['A'][0] = -5
+ df['A'][1] = -6
+ assert_frame_equal(df, expected)
+
+ expected = DataFrame([[-5,2],[np.nan,3.]],columns=list('AB'))
+ df = DataFrame({ 'A' : np.arange(2), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
+ self.assert_(not df._is_copy)
+ df['A'][0] = -5
+ df['A'][1] = np.nan
+ assert_frame_equal(df, expected)
+ self.assert_(not df['A']._is_copy)
+
+ # using a copy (the chain), fails
+ df = DataFrame({ 'A' : np.arange(2), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
+ def f():
+ df.loc[0]['A'] = -5
+ self.assertRaises(com.SettingWithCopyError, f)
+
+ # doc example
+ df = DataFrame({'a' : ['one', 'one', 'two',
+ 'three', 'two', 'one', 'six'],
+ 'c' : np.arange(7) })
+ self.assert_(not df._is_copy)
+ expected = DataFrame({'a' : ['one', 'one', 'two',
+ 'three', 'two', 'one', 'six'],
+ 'c' : [42,42,2,3,4,42,6]})
+
+ def f():
+ df[df.a.str.startswith('o')]['c'] = 42
+ self.assertRaises(com.SettingWithCopyError, f)
+ df['c'][df.a.str.startswith('o')] = 42
+ assert_frame_equal(df,expected)
+
+ expected = DataFrame({'A':[111,'bbb','ccc'],'B':[1,2,3]})
+ df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
+ df['A'][0] = 111
+ def f():
+ df.loc[0]['A'] = 111
+ self.assertRaises(com.SettingWithCopyError, f)
+ assert_frame_equal(df,expected)
+
+ # warnings
+ pd.set_option('chained_assignment','warn')
+ df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
+ df.loc[0]['A'] = 111
+
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
| ```
In [1]: df = DataFrame({"A": [1, 2, 3, 4, 5], "B": [3.125, 4.12, 3.1, 6.2, 7.]})
In [2]: row = df.loc[0]
```
Default is to warn
```
In [3]: row["A"] = 0
pandas/core/generic.py:1001: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_index,col_indexer] = value instead
warnings.warn(t,SettingWithCopyWarning)
In [4]: row
Out[4]:
A 0.000
B 3.125
Name: 0, dtype: float64
In [5]: df
Out[5]:
A B
0 1 3.125
1 2 4.120
2 3 3.100
3 4 6.200
4 5 7.000
```
You can turn it off
```
In [6]: pd.set_option('chained',None)
In [7]: row["A"] = 0
In [8]: row
Out[8]:
A 0.000
B 3.125
Name: 0, dtype: float64
In [9]: df
Out[9]:
A B
0 1 3.125
1 2 4.120
2 3 3.100
3 4 6.200
4 5 7.000
```
Or set to raise
```
In [10]: row["A"] = 0
SettingWithCopyError: A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_index,col_indexer] = value instead
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5390 | 2013-10-30T21:50:14Z | 2013-11-06T22:24:20Z | 2013-11-06T22:24:20Z | 2014-06-12T12:24:22Z |
ER/API: unicode indices not supported on table formats in py2 (GH5386) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 4d628fac78cf0..0ef2c29af8139 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -277,6 +277,7 @@ API Changes
- ``numexpr`` 2.2.2 fixes incompatiblity in PyTables 2.4 (:issue:`4908`)
- ``flush`` now accepts an ``fsync`` parameter, which defaults to ``False``
(:issue:`5364`)
+ - ``unicode`` indices not supported on ``table`` formats (:issue:`5386`)
- ``JSON``
- added ``date_unit`` parameter to specify resolution of timestamps.
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5919589978903..97dc8dcdec73a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1919,6 +1919,10 @@ def set_version(self):
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None))
+ @property
+ def format_type(self):
+ return 'fixed'
+
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
@@ -2146,7 +2150,8 @@ def write_index(self, key, index):
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
- converted = _convert_index(index, self.encoding).set_name('index')
+ converted = _convert_index(index, self.encoding,
+ self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
@@ -2192,7 +2197,8 @@ def write_multi_index(self, key, index):
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
- conv_level = _convert_index(lev, self.encoding).set_name(level_key)
+ conv_level = _convert_index(lev, self.encoding,
+ self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
@@ -2609,6 +2615,10 @@ def __init__(self, *args, **kwargs):
def table_type_short(self):
return self.table_type.split('_')[0]
+ @property
+ def format_type(self):
+ return 'table'
+
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
@@ -2991,7 +3001,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
- a, self.encoding).set_name(name).set_axis(i)
+ a, self.encoding, self.format_type).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
@@ -3823,7 +3833,7 @@ def _get_info(info, name):
idx = info[name] = dict()
return idx
-def _convert_index(index, encoding=None):
+def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
@@ -3870,9 +3880,13 @@ def _convert_index(index, encoding=None):
converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize,
index_name=index_name)
elif inferred_type == 'unicode':
- atom = _tables().ObjectAtom()
- return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
- index_name=index_name)
+ if format_type == 'fixed':
+ atom = _tables().ObjectAtom()
+ return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
+ index_name=index_name)
+ raise TypeError(
+ "[unicode] is not supported as a in index type for [{0}] formats".format(format_type))
+
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index a08073bd7bd35..598f374e0fcf7 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -755,6 +755,38 @@ def test_append_series(self):
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
+ def test_store_index_types(self):
+ # GH5386
+ # test storing various index types
+
+ with ensure_clean(self.path) as store:
+
+ def check(format,index):
+ df = DataFrame(np.random.randn(10,2),columns=list('AB'))
+ df.index = index(len(df))
+
+ _maybe_remove(store, 'df')
+ store.put('df',df,format=format)
+ assert_frame_equal(df,store['df'])
+
+ for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
+ tm.makeDateIndex, tm.makePeriodIndex ]:
+
+ check('table',index)
+ check('fixed',index)
+
+ # unicode
+ index = tm.makeUnicodeIndex
+ if compat.PY3:
+ check('table',index)
+ check('fixed',index)
+ else:
+
+ # only support for fixed types (and they have a perf warning)
+ self.assertRaises(TypeError, check, 'table', index)
+ with tm.assert_produces_warning(expected_warning=PerformanceWarning):
+ check('fixed',index)
+
def test_encoding(self):
if sys.byteorder != 'little':
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index f40a8e1a5a9d6..2e4d1f3e8df74 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -336,7 +336,8 @@ def ensure_clean(filename=None, return_filelike=False):
yield filename
finally:
try:
- os.remove(filename)
+ if os.path.exists(filename):
+ os.remove(filename)
except Exception as e:
print(e)
| closes #5386
| https://api.github.com/repos/pandas-dev/pandas/pulls/5387 | 2013-10-30T13:20:45Z | 2013-10-30T13:43:13Z | 2013-10-30T13:43:13Z | 2014-06-23T13:40:00Z |
ENH: Add mode method to Series and DataFrame | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 20bfe037f7373..c2c2bc8710af2 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -348,6 +348,7 @@ Computations / Descriptive Stats
Series.mean
Series.median
Series.min
+ Series.mode
Series.nunique
Series.pct_change
Series.prod
@@ -632,6 +633,7 @@ Computations / Descriptive Stats
DataFrame.mean
DataFrame.median
DataFrame.min
+ DataFrame.mode
DataFrame.pct_change
DataFrame.prod
DataFrame.quantile
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 78b0a54b8893f..19f293561efb7 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -378,6 +378,7 @@ optional ``level`` parameter which applies only if the object has a
``median``, Arithmetic median of values
``min``, Minimum
``max``, Maximum
+ ``mode``, Mode
``abs``, Absolute Value
``prod``, Product of values
``std``, Unbiased standard deviation
@@ -473,8 +474,8 @@ value, ``idxmin`` and ``idxmax`` return the first matching index:
.. _basics.discretization:
-Value counts (histogramming)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Value counts (histogramming) / Mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``value_counts`` Series method and top-level function computes a histogram
of a 1D array of values. It can also be used as a function on regular arrays:
@@ -487,6 +488,17 @@ of a 1D array of values. It can also be used as a function on regular arrays:
s.value_counts()
value_counts(data)
+Similarly, you can get the most frequently occuring value(s) (the mode) of the values in a Series or DataFrame:
+
+.. ipython:: python
+
+ data = [1, 1, 3, 3, 3, 5, 5, 7, 7, 7]
+ s = Series(data)
+ s.mode()
+ df = pd.DataFrame({"A": np.random.randint(0, 7, size=50),
+ "B": np.random.randint(-10, 15, size=50)})
+ df.mode()
+
Discretization and quantiling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -514,6 +526,7 @@ normally distributed data into equal-size quartiles like so:
value_counts(factor)
We can also pass infinite values to define the bins:
+
.. ipython:: python
arr = np.random.randn(20)
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 4b33c20424b33..5fef061b9e447 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -65,6 +65,8 @@ New features
string via the ``date_format`` keyword (:issue:`4313`)
- Added ``LastWeekOfMonth`` DateOffset (:issue:`4637`)
- Added ``FY5253``, and ``FY5253Quarter`` DateOffsets (:issue:`4511`)
+ - Added ``mode()`` method to ``Series`` and ``DataFrame`` to get the
+ statistical mode(s) of a column/series. (:issue:`5367`)
Experimental Features
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 27794d6af1a8d..9b958d59d5a74 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -81,6 +81,8 @@ API changes
``SparsePanel``, etc.), now support the entire set of arithmetic operators
and arithmetic flex methods (add, sub, mul, etc.). ``SparsePanel`` does not
support ``pow`` or ``mod`` with non-scalars. (:issue:`3765`)
+- ``Series`` and ``DataFrame`` now have a ``mode()`` method to calculate the
+ statistical mode(s) by axis/Series. (:issue:`5367`)
Prior Version Deprecations/Changes
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 5778a524a584a..6ab6f15aaab15 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -3,6 +3,7 @@
intended for public consumption
"""
+from warnings import warn
import numpy as np
import pandas.core.common as com
@@ -221,6 +222,41 @@ def value_counts(values, sort=True, ascending=False, normalize=False, bins=None)
return result
+def mode(values):
+ "Returns the mode or mode(s) of the passed Series or ndarray (sorted)"
+ # must sort because hash order isn't necessarily defined.
+ from pandas.core.series import Series
+
+ if isinstance(values, Series):
+ constructor = values._constructor
+ values = values.values
+ else:
+ values = np.asanyarray(values)
+ constructor = Series
+
+ dtype = values.dtype
+ if com.is_integer_dtype(values.dtype):
+ values = com._ensure_int64(values)
+ result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
+
+ elif issubclass(values.dtype.type, (np.datetime64,np.timedelta64)):
+ dtype = values.dtype
+ values = values.view(np.int64)
+ result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
+
+ else:
+ mask = com.isnull(values)
+ values = com._ensure_object(values)
+ res = htable.mode_object(values, mask)
+ try:
+ res = sorted(res)
+ except TypeError as e:
+ warn("Unable to sort modes: %s" % e)
+ result = constructor(res, dtype=dtype)
+
+ return result
+
+
def rank(values, axis=0, method='average', na_option='keep',
ascending=True):
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0a5306de9bbb5..1cb0c4adcf5d4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4083,6 +4083,28 @@ def _get_agg_axis(self, axis_num):
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
+ def mode(self, axis=0, numeric_only=False):
+ """
+ Gets the mode of each element along the axis selected. Empty if nothing
+ has 2+ occurrences. Adds a row for each mode per label, fills in gaps
+ with nan.
+
+ Parameters
+ ----------
+ axis : {0, 1, 'index', 'columns'} (default 0)
+ 0/'index' : get mode of each column
+ 1/'columns' : get mode of each row
+ numeric_only : bool, default False
+ if True, only apply to numeric columns
+
+ Returns
+ -------
+ modes : DataFrame (sorted)
+ """
+ data = self if not numeric_only else self._get_numeric_data()
+ f = lambda s: s.mode()
+ return data.apply(f, axis=axis)
+
def quantile(self, q=0.5, axis=0, numeric_only=True):
"""
Return values at the given quantile over requested axis, a la
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 798183a29c48b..3fe9540ba3fe0 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1127,6 +1127,26 @@ def value_counts(self, normalize=False, sort=True, ascending=False, bins=None):
return value_counts(self.values, sort=sort, ascending=ascending,
normalize=normalize, bins=bins)
+ def mode(self):
+ """Returns the mode(s) of the dataset.
+
+ Empty if nothing occurs at least 2 times. Always returns Series even
+ if only one value.
+
+ Parameters
+ ----------
+ sort : bool, default True
+ if True, will lexicographically sort values, if False skips
+ sorting. Result ordering when ``sort=False`` is not defined.
+
+ Returns
+ -------
+ modes : Series (sorted)
+ """
+ # TODO: Add option for bins like value_counts()
+ from pandas.core.algorithms import mode
+ return mode(self)
+
def unique(self):
"""
Return array of unique values in the Series. Significantly faster than
diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx
index 1b132ea91f515..10c43478a5352 100644
--- a/pandas/hashtable.pyx
+++ b/pandas/hashtable.pyx
@@ -890,15 +890,12 @@ cdef class Int64Factorizer:
return labels
-
-def value_count_int64(ndarray[int64_t] values):
+cdef build_count_table_int64(ndarray[int64_t] values, kh_int64_t *table):
cdef:
+ int k
Py_ssize_t i, n = len(values)
- kh_int64_t *table
int ret = 0
- list uniques = []
- table = kh_init_int64()
kh_resize_int64(table, n)
for i in range(n):
@@ -910,8 +907,17 @@ def value_count_int64(ndarray[int64_t] values):
k = kh_put_int64(table, val, &ret)
table.vals[k] = 1
- # for (k = kh_begin(h); k != kh_end(h); ++k)
- # if (kh_exist(h, k)) kh_value(h, k) = 1;
+
+cpdef value_count_int64(ndarray[int64_t] values):
+ cdef:
+ Py_ssize_t i
+ kh_int64_t *table
+ int ret = 0
+ int k
+
+ table = kh_init_int64()
+ build_count_table_int64(values, table)
+
i = 0
result_keys = np.empty(table.n_occupied, dtype=np.int64)
result_counts = np.zeros(table.n_occupied, dtype=np.int64)
@@ -924,15 +930,15 @@ def value_count_int64(ndarray[int64_t] values):
return result_keys, result_counts
-def value_count_object(ndarray[object] values,
- ndarray[uint8_t, cast=True] mask):
+
+cdef build_count_table_object(ndarray[object] values,
+ ndarray[uint8_t, cast=True] mask,
+ kh_pymap_t *table):
cdef:
+ int k
Py_ssize_t i, n = len(values)
- kh_pymap_t *table
int ret = 0
- list uniques = []
- table = kh_init_pymap()
kh_resize_pymap(table, n // 10)
for i in range(n):
@@ -947,6 +953,17 @@ def value_count_object(ndarray[object] values,
k = kh_put_pymap(table, <PyObject*> val, &ret)
table.vals[k] = 1
+
+cpdef value_count_object(ndarray[object] values,
+ ndarray[uint8_t, cast=True] mask):
+ cdef:
+ Py_ssize_t i = len(values)
+ kh_pymap_t *table
+ int k
+
+ table = kh_init_pymap()
+ build_count_table_object(values, mask, table)
+
i = 0
result_keys = np.empty(table.n_occupied, dtype=object)
result_counts = np.zeros(table.n_occupied, dtype=np.int64)
@@ -959,3 +976,64 @@ def value_count_object(ndarray[object] values,
return result_keys, result_counts
+
+def mode_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask):
+ cdef:
+ int count, max_count = 2
+ int j = -1 # so you can do +=
+ int k
+ Py_ssize_t i, n = len(values)
+ kh_pymap_t *table
+ int ret = 0
+
+ table = kh_init_pymap()
+ build_count_table_object(values, mask, table)
+
+ modes = np.empty(table.n_buckets, dtype=np.object_)
+ for k in range(table.n_buckets):
+ if kh_exist_pymap(table, k):
+ count = table.vals[k]
+
+ if count == max_count:
+ j += 1
+ elif count > max_count:
+ max_count = count
+ j = 0
+ else:
+ continue
+ modes[j] = <object> table.keys[k]
+
+ kh_destroy_pymap(table)
+
+ return modes[:j+1]
+
+
+def mode_int64(ndarray[int64_t] values):
+ cdef:
+ int val, max_val = 2
+ int j = -1 # so you can do +=
+ int k
+ kh_int64_t *table
+ list uniques = []
+
+ table = kh_init_int64()
+
+ build_count_table_int64(values, table)
+
+ modes = np.empty(table.n_buckets, dtype=np.int64)
+ for k in range(table.n_buckets):
+ if kh_exist_int64(table, k):
+ val = table.vals[k]
+
+ if val == max_val:
+ j += 1
+ elif val > max_val:
+ max_val = val
+ j = 0
+ else:
+ continue
+ modes[j] = table.keys[k]
+
+ kh_destroy_int64(table)
+
+ return modes[:j+1]
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2f7696f2520fc..e0abe7700f28d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10050,6 +10050,59 @@ def wrapper(x):
self.assert_(np.isnan(r0).all())
self.assert_(np.isnan(r1).all())
+ def test_mode(self):
+ df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
+ "B": [10, 10, 10, np.nan, 3, 4],
+ "C": [8, 8, 8, 9, 9, 9],
+ "D": range(6),
+ "E": [8, 8, 1, 1, 3, 3]})
+ assert_frame_equal(df[["A"]].mode(),
+ pd.DataFrame({"A": [12]}))
+ assert_frame_equal(df[["D"]].mode(),
+ pd.DataFrame(pd.Series([], dtype="int64"),
+ columns=["D"]))
+ assert_frame_equal(df[["E"]].mode(),
+ pd.DataFrame(pd.Series([1, 3, 8], dtype="int64"),
+ columns=["E"]))
+ assert_frame_equal(df[["A", "B"]].mode(),
+ pd.DataFrame({"A": [12], "B": [10.]}))
+ assert_frame_equal(df.mode(),
+ pd.DataFrame({"A": [12, np.nan, np.nan],
+ "B": [10, np.nan, np.nan],
+ "C": [8, 9, np.nan],
+ "D": [np.nan, np.nan, np.nan],
+ "E": [1, 3, 8]}))
+
+ # outputs in sorted order
+ df["C"] = list(reversed(df["C"]))
+ print(df["C"])
+ print(df["C"].mode())
+ a, b = (df[["A", "B", "C"]].mode(),
+ pd.DataFrame({"A": [12, np.nan],
+ "B": [10, np.nan],
+ "C": [8, 9]}))
+ print(a)
+ print(b)
+ assert_frame_equal(a, b)
+ # should work with heterogeneous types
+ df = pd.DataFrame({"A": range(6),
+ "B": pd.date_range('2011', periods=6),
+ "C": list('abcdef')})
+ exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
+ "B": pd.Series([], dtype=df["B"].dtype),
+ "C": pd.Series([], dtype=df["C"].dtype)})
+ assert_frame_equal(df.mode(), exp)
+
+ # and also when not empty
+ df.loc[1, "A"] = 0
+ df.loc[4, "B"] = df.loc[3, "B"]
+ df.loc[5, "C"] = 'e'
+ exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
+ "B": pd.Series([df.loc[3, "B"]], dtype=df["B"].dtype),
+ "C": pd.Series(['e'], dtype=df["C"].dtype)})
+
+ assert_frame_equal(df.mode(), exp)
+
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index e5186cb3030ff..91fa1f1a19ffc 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -19,10 +19,8 @@
from pandas.core.index import MultiIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
import pandas.core.config as cf
-import pandas.core.series as smod
import pandas.lib as lib
-import pandas.core.common as com
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
@@ -1721,6 +1719,35 @@ def test_median(self):
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
+ def test_mode(self):
+ s = Series([12, 12, 11, 10, 19, 11])
+ exp = Series([11, 12])
+ assert_series_equal(s.mode(), exp)
+
+ assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype=int))
+
+ lst = [5] * 20 + [1] * 10 + [6] * 25
+ np.random.shuffle(lst)
+ s = Series(lst)
+ assert_series_equal(s.mode(), Series([6]))
+
+ s = Series([5] * 10)
+ assert_series_equal(s.mode(), Series([5]))
+
+ s = Series(lst)
+ s[0] = np.nan
+ assert_series_equal(s.mode(), Series([6], dtype=float))
+
+ s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
+ assert_series_equal(s.mode(), Series(['e']))
+
+ s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
+ assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
+ s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
+ '2013-01-02'], dtype='M8[ns]')
+ assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
+ dtype='M8[ns]'))
+
def test_prod(self):
self._check_stat_op('prod', np.prod)
| Closes #5367 - generally as fast or faster than value_counts (which makes sense because it has to construct Series), so should be relatively good performance-wise. Also, doesn't get stuck on value_counts' pathological case (huge array with # uniques close to/at the size of the array).
Not using result of hashtable.value_count() under the hood and instead iterating over klib table directly gives a _huge_ speedup. I just moved the value_count table creation method to a separate function (zero perf hit). For performance breakouts check out this gist:
https://gist.github.com/jtratner/7225878
DataFrame version delegates to Series' version at each level.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5380 | 2013-10-30T03:32:21Z | 2013-11-05T06:00:57Z | 2013-11-05T06:00:57Z | 2014-06-15T23:15:45Z |
BUG: Fixes color selection in andrews_curve | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 4d628fac78cf0..f628917238e47 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -205,6 +205,8 @@ Improvements to existing features
wrapper is updated inplace, a copy is still made internally.
(:issue:`1960`, :issue:`5247`, and related :issue:`2325` [still not
closed])
+ - Fixed bug in `tools.plotting.andrews_curvres` so that lines are drawn grouped
+ by color as expected.
API Changes
~~~~~~~~~~~
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7de5840384974..c4255e706b19f 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -454,13 +454,14 @@ def f(x):
n = len(data)
class_col = data[class_column]
+ uniq_class = class_col.drop_duplicates()
columns = [data[col] for col in data.columns if (col != class_column)]
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
- colors = _get_standard_colors(num_colors=n, colormap=colormap,
+ colors = _get_standard_colors(num_colors=len(uniq_class), colormap=colormap,
color_type='random', color=kwds.get('color'))
-
+ col_dict = dict([(klass, col) for klass, col in zip(uniq_class, colors)])
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
@@ -471,9 +472,9 @@ def f(x):
if com.pprint_thing(class_col[i]) not in used_legends:
label = com.pprint_thing(class_col[i])
used_legends.add(label)
- ax.plot(x, y, color=colors[i], label=label, **kwds)
+ ax.plot(x, y, color=col_dict[class_col[i]], label=label, **kwds)
else:
- ax.plot(x, y, color=colors[i], **kwds)
+ ax.plot(x, y, color=col_dict[class_col[i]], **kwds)
ax.legend(loc='upper right')
ax.grid()
@@ -656,10 +657,10 @@ def lag_plot(series, lag=1, ax=None, **kwds):
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
-
+
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
-
+
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
@@ -1212,20 +1213,20 @@ def __init__(self, data, x, y, **kwargs):
y = self.data.columns[y]
self.x = x
self.y = y
-
-
+
+
def _make_plot(self):
x, y, data = self.x, self.y, self.data
ax = self.axes[0]
ax.scatter(data[x].values, data[y].values, **self.kwds)
-
+
def _post_plot_logic(self):
ax = self.axes[0]
- x, y = self.x, self.y
+ x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
-
-
+
+
class LinePlot(MPLPlot):
def __init__(self, data, **kwargs):
@@ -1658,25 +1659,25 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
elif kind == 'kde':
klass = KdePlot
elif kind == 'scatter':
- klass = ScatterPlot
+ klass = ScatterPlot
else:
raise ValueError('Invalid chart type given %s' % kind)
if kind == 'scatter':
- plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
- rot=rot,legend=legend, ax=ax, style=style,
+ plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
+ rot=rot,legend=legend, ax=ax, style=style,
fontsize=fontsize, use_index=use_index, sharex=sharex,
- sharey=sharey, xticks=xticks, yticks=yticks,
- xlim=xlim, ylim=ylim, title=title, grid=grid,
- figsize=figsize, logx=logx, logy=logy,
- sort_columns=sort_columns, secondary_y=secondary_y,
+ sharey=sharey, xticks=xticks, yticks=yticks,
+ xlim=xlim, ylim=ylim, title=title, grid=grid,
+ figsize=figsize, logx=logx, logy=logy,
+ sort_columns=sort_columns, secondary_y=secondary_y,
**kwds)
else:
if x is not None:
if com.is_integer(x) and not frame.columns.holds_integer():
x = frame.columns[x]
frame = frame.set_index(x)
-
+
if y is not None:
if com.is_integer(y) and not frame.columns.holds_integer():
y = frame.columns[y]
@@ -1691,7 +1692,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
grid=grid, logx=logx, logy=logy,
secondary_y=secondary_y, title=title,
figsize=figsize, fontsize=fontsize, **kwds)
-
+
else:
plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
legend=legend, ax=ax, style=style, fontsize=fontsize,
@@ -1700,7 +1701,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
title=title, grid=grid, figsize=figsize, logx=logx,
logy=logy, sort_columns=sort_columns,
secondary_y=secondary_y, **kwds)
-
+
plot_obj.generate()
plot_obj.draw()
if subplots:
| Fixed bug introduced in b5265a5b84ae1aeaf5fff88f1f9e5872a9b404e6 which change from picking a single random color for the class to picking a random color for each line.
This returns the color selection to a per-set basis.
See http://stackoverflow.com/questions/19667209/colors-in-andrews-curves
| https://api.github.com/repos/pandas-dev/pandas/pulls/5378 | 2013-10-30T02:46:42Z | 2013-10-31T03:28:25Z | 2013-10-31T03:28:25Z | 2014-06-22T05:09:45Z |
repr for PeriodIndex does not handle <=2 elements well (GH5372) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0ef2c29af8139..f65c613ddb96a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -760,7 +760,8 @@ Bug Fixes
(thanks for catching this @yarikoptic!)
- Fixed html tests on win32. (:issue:`4580`)
- Make sure that ``head/tail`` are ``iloc`` based, (:issue:`5370`)
-
+ - Fixed bug for ``PeriodIndex`` string representation if there are 1 or 2
+ elements. (:issue:`5372`)
pandas 0.12.0
-------------
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index c25d16e8b28f8..974c0a52a35de 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1082,7 +1082,11 @@ def __repr__(self):
output = com.pprint_thing(self.__class__) + '\n'
output += 'freq: %s\n' % self.freq
n = len(self)
- if n:
+ if n == 1:
+ output += '[%s]\n' % (self[0])
+ elif n == 2:
+ output += '[%s, %s]\n' % (self[0], self[-1])
+ elif n:
output += '[%s, ..., %s]\n' % (self[0], self[-1])
output += 'length: %d' % n
return output
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 312a88bcbc5a9..de6918eb8a1d1 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -1672,7 +1672,19 @@ def test_asfreq(self):
def test_ts_repr(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
- repr(ts)
+ repr(ts) # ??
+
+ val = period_range('2013Q1', periods=1, freq="Q")
+ expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1]\nlength: 1"
+ assert_equal(repr(val), expected)
+
+ val = period_range('2013Q1', periods=2, freq="Q")
+ expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1, 2013Q2]\nlength: 2"
+ assert_equal(repr(val), expected)
+
+ val = period_range('2013Q1', periods=3, freq="Q")
+ expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1, ..., 2013Q3]\nlength: 3"
+ assert_equal(repr(val), expected)
def test_period_index_unicode(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
| closes https://github.com/pydata/pandas/issues/5372
| https://api.github.com/repos/pandas-dev/pandas/pulls/5376 | 2013-10-29T19:33:00Z | 2013-10-31T01:24:57Z | 2013-10-31T01:24:56Z | 2014-07-16T08:37:57Z |
implement bits of numpy_helper in cython where possible | diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h
index 6c2029fff8a1a..844be9b292be3 100644
--- a/pandas/_libs/src/numpy_helper.h
+++ b/pandas/_libs/src/numpy_helper.h
@@ -18,33 +18,6 @@ The full license is in the LICENSE file, distributed with this software.
PANDAS_INLINE npy_int64 get_nat(void) { return NPY_MIN_INT64; }
-PANDAS_INLINE int is_integer_object(PyObject* obj) {
- return (!PyBool_Check(obj)) && PyArray_IsIntegerScalar(obj);
-}
-
-PANDAS_INLINE int is_float_object(PyObject* obj) {
- return (PyFloat_Check(obj) || PyArray_IsScalar(obj, Floating));
-}
-PANDAS_INLINE int is_complex_object(PyObject* obj) {
- return (PyComplex_Check(obj) || PyArray_IsScalar(obj, ComplexFloating));
-}
-
-PANDAS_INLINE int is_bool_object(PyObject* obj) {
- return (PyBool_Check(obj) || PyArray_IsScalar(obj, Bool));
-}
-
-PANDAS_INLINE int is_string_object(PyObject* obj) {
- return (PyString_Check(obj) || PyUnicode_Check(obj));
-}
-
-PANDAS_INLINE int is_datetime64_object(PyObject* obj) {
- return PyArray_IsScalar(obj, Datetime);
-}
-
-PANDAS_INLINE int is_timedelta64_object(PyObject* obj) {
- return PyArray_IsScalar(obj, Timedelta);
-}
-
PANDAS_INLINE int assign_value_1d(PyArrayObject* ap, Py_ssize_t _i,
PyObject* v) {
npy_intp i = (npy_intp)_i;
@@ -80,17 +53,4 @@ void set_array_not_contiguous(PyArrayObject* ao) {
ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS);
}
-// If arr is zerodim array, return a proper array scalar (e.g. np.int64).
-// Otherwise, return arr as is.
-PANDAS_INLINE PyObject* unbox_if_zerodim(PyObject* arr) {
- if (PyArray_IsZeroDim(arr)) {
- PyObject* ret;
- ret = PyArray_ToScalar(PyArray_DATA(arr), arr);
- return ret;
- } else {
- Py_INCREF(arr);
- return arr;
- }
-}
-
#endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_
diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd
index be6591a118dc5..cf23df1279f34 100644
--- a/pandas/_libs/src/util.pxd
+++ b/pandas/_libs/src/util.pxd
@@ -1,24 +1,76 @@
-from numpy cimport ndarray
+from numpy cimport ndarray, NPY_C_CONTIGUOUS, NPY_F_CONTIGUOUS
cimport numpy as cnp
+cnp.import_array()
+
cimport cpython
+from cpython cimport PyTypeObject
+
+cdef extern from "Python.h":
+ # Note: importing extern-style allows us to declare these as nogil
+ # functions, whereas `from cpython cimport` does not.
+ bint PyUnicode_Check(object obj) nogil
+ bint PyString_Check(object obj) nogil
+ bint PyBool_Check(object obj) nogil
+ bint PyFloat_Check(object obj) nogil
+ bint PyComplex_Check(object obj) nogil
+ bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil
+
+
+cdef extern from "numpy/arrayobject.h":
+ PyTypeObject PyFloatingArrType_Type
+
+cdef extern from "numpy/ndarrayobject.h":
+ PyTypeObject PyTimedeltaArrType_Type
+ PyTypeObject PyDatetimeArrType_Type
+ PyTypeObject PyComplexFloatingArrType_Type
+ PyTypeObject PyBoolArrType_Type
+
+ bint PyArray_IsIntegerScalar(obj) nogil
+ bint PyArray_Check(obj) nogil
+
+# --------------------------------------------------------------------
+# Type Checking
+
+cdef inline bint is_string_object(object obj) nogil:
+ return PyString_Check(obj) or PyUnicode_Check(obj)
+
+
+cdef inline bint is_integer_object(object obj) nogil:
+ return not PyBool_Check(obj) and PyArray_IsIntegerScalar(obj)
+
+
+cdef inline bint is_float_object(object obj) nogil:
+ return (PyFloat_Check(obj) or
+ (PyObject_TypeCheck(obj, &PyFloatingArrType_Type)))
+
+cdef inline bint is_complex_object(object obj) nogil:
+ return (PyComplex_Check(obj) or
+ PyObject_TypeCheck(obj, &PyComplexFloatingArrType_Type))
+
+
+cdef inline bint is_bool_object(object obj) nogil:
+ return (PyBool_Check(obj) or
+ PyObject_TypeCheck(obj, &PyBoolArrType_Type))
+
+
+cdef inline bint is_timedelta64_object(object obj) nogil:
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
+
+
+cdef inline bint is_datetime64_object(object obj) nogil:
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
+
+# --------------------------------------------------------------------
cdef extern from "numpy_helper.h":
void set_array_not_contiguous(ndarray ao)
- int is_integer_object(object)
- int is_float_object(object)
- int is_complex_object(object)
- int is_bool_object(object)
- int is_string_object(object)
- int is_datetime64_object(object)
- int is_timedelta64_object(object)
int assign_value_1d(ndarray, Py_ssize_t, object) except -1
cnp.int64_t get_nat()
object get_value_1d(ndarray, Py_ssize_t)
char *get_c_string(object) except NULL
object char_to_string(char*)
- object unbox_if_zerodim(object arr)
ctypedef fused numeric:
cnp.int8_t
@@ -112,3 +164,22 @@ cdef inline bint _checknan(object val):
cdef inline bint is_period_object(object val):
return getattr(val, '_typ', '_typ') == 'period'
+
+
+cdef inline object unbox_if_zerodim(object arr):
+ """
+ If arr is zerodim array, return a proper array scalar (e.g. np.int64).
+ Otherwise, return arr as is.
+
+ Parameters
+ ----------
+ arr : object
+
+ Returns
+ -------
+ result : object
+ """
+ if cnp.PyArray_IsZeroDim(arr):
+ return cnp.PyArray_ToScalar(cnp.PyArray_DATA(arr), arr)
+ else:
+ return arr
diff --git a/setup.py b/setup.py
index 859d50303ecb1..a140221f943ea 100755
--- a/setup.py
+++ b/setup.py
@@ -686,8 +686,7 @@ def pxd(name):
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas._libs.json',
- depends=['pandas/_libs/src/ujson/lib/ultrajson.h',
- 'pandas/_libs/src/numpy_helper.h'],
+ depends=['pandas/_libs/src/ujson/lib/ultrajson.h'],
sources=(['pandas/_libs/src/ujson/python/ujson.c',
'pandas/_libs/src/ujson/python/objToJSON.c',
'pandas/_libs/src/ujson/python/JSONtoObj.c',
| Like with the transition to tslibs.np_datetime, this implements pieces of numpy_helper.h directly in cython in util.pxd. The generated C should be equivalent to existing versions, but that is worth double-checking.
One dependency is removed from setup.py that was missed in #19415, should have been deleted there. | https://api.github.com/repos/pandas-dev/pandas/pulls/19450 | 2018-01-30T00:33:23Z | 2018-01-31T11:30:22Z | 2018-01-31T11:30:22Z | 2018-02-11T21:58:35Z |
Continue de-nesting core.ops | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index ba8a15b60ba56..6ea4a81cb52a1 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -39,8 +39,7 @@
ABCSeries,
ABCDataFrame,
ABCIndex,
- ABCPeriodIndex,
- ABCSparseSeries)
+ ABCSparseSeries, ABCSparseArray)
def _gen_eval_kwargs(name):
@@ -445,8 +444,14 @@ def names(x):
return new_methods
-def add_methods(cls, new_methods, force):
+def add_methods(cls, new_methods):
for name, method in new_methods.items():
+ # For most methods, if we find that the class already has a method
+ # of the same name, it is OK to over-write it. The exception is
+ # inplace methods (__iadd__, __isub__, ...) for SparseArray, which
+ # retain the np.ndarray versions.
+ force = not (issubclass(cls, ABCSparseArray) and
+ name.startswith('__i'))
if force or name not in cls.__dict__:
bind_method(cls, name, method)
@@ -454,8 +459,7 @@ def add_methods(cls, new_methods, force):
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
- comp_method=None, bool_method=None,
- force=False):
+ comp_method=None, bool_method=None):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
@@ -469,9 +473,6 @@ def add_special_arithmetic_methods(cls, arith_method=None,
factory for rich comparison - signature: f(op, name, str_rep)
bool_method : function (optional)
factory for boolean methods - signature: f(op, name, str_rep)
- force : bool, default False
- if False, checks whether function is defined **on ``cls.__dict__``**
- before defining if True, always defines functions on class base
"""
new_methods = _create_methods(cls, arith_method, comp_method, bool_method,
special=True)
@@ -512,12 +513,11 @@ def f(self, other):
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"])))
- add_methods(cls, new_methods=new_methods, force=force)
+ add_methods(cls, new_methods=new_methods)
def add_flex_arithmetic_methods(cls, flex_arith_method,
- flex_comp_method=None, flex_bool_method=None,
- force=False):
+ flex_comp_method=None, flex_bool_method=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
@@ -529,9 +529,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
f(op, name, str_rep)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
- force : bool, default False
- if False, checks whether function is defined **on ``cls.__dict__``**
- before defining if True, always defines functions on class base
"""
new_methods = _create_methods(cls, flex_arith_method,
flex_comp_method, flex_bool_method,
@@ -544,7 +541,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
if k in new_methods:
new_methods.pop(k)
- add_methods(cls, new_methods=new_methods, force=force)
+ add_methods(cls, new_methods=new_methods)
# -----------------------------------------------------------------------------
@@ -614,14 +611,11 @@ def na_op(x, y):
result = np.empty(x.size, dtype=dtype)
mask = notna(x) & notna(y)
result[mask] = op(x[mask], com._values_from_object(y[mask]))
- elif isinstance(x, np.ndarray):
+ else:
+ assert isinstance(x, np.ndarray)
result = np.empty(len(x), dtype=x.dtype)
mask = notna(x)
result[mask] = op(x[mask], y)
- else:
- raise TypeError("{typ} cannot perform the operation "
- "{op}".format(typ=type(x).__name__,
- op=str_rep))
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
@@ -658,6 +652,10 @@ def wrapper(left, right, name=name, na_op=na_op):
index=left.index, name=res_name,
dtype=result.dtype)
+ elif is_categorical_dtype(left):
+ raise TypeError("{typ} cannot perform the operation "
+ "{op}".format(typ=type(left).__name__, op=str_rep))
+
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
@@ -745,8 +743,12 @@ def na_op(x, y):
elif is_categorical_dtype(y) and not is_scalar(y):
return op(y, x)
- if is_object_dtype(x.dtype):
+ elif is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
+
+ elif is_datetimelike_v_numeric(x, y):
+ raise TypeError("invalid type comparison")
+
else:
# we want to compare like types
@@ -754,15 +756,6 @@ def na_op(x, y):
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
- if is_datetimelike_v_numeric(x, y):
- raise TypeError("invalid type comparison")
-
- # numpy does not like comparisons vs None
- if is_scalar(y) and isna(y):
- if name == '__ne__':
- return np.ones(len(x), dtype=bool)
- else:
- return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
@@ -795,15 +788,18 @@ def wrapper(self, other, axis=None):
if axis is not None:
self._get_axis_number(axis)
- if isinstance(other, ABCSeries):
+ if isinstance(other, ABCDataFrame): # pragma: no cover
+ # Defer to DataFrame implementation; fail early
+ return NotImplemented
+
+ elif isinstance(other, ABCSeries):
name = com._maybe_match_name(self, other)
if not self._indexed_same(other):
msg = 'Can only compare identically-labeled Series objects'
raise ValueError(msg)
- return self._constructor(na_op(self.values, other.values),
- index=self.index, name=name)
- elif isinstance(other, ABCDataFrame): # pragma: no cover
- return NotImplemented
+ res_values = na_op(self.values, other.values)
+ return self._constructor(res_values, index=self.index, name=name)
+
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
@@ -811,23 +807,25 @@ def wrapper(self, other, axis=None):
len(self) != len(other)):
raise ValueError('Lengths must match to compare')
- if isinstance(other, ABCPeriodIndex):
- # temp workaround until fixing GH 13637
- # tested in test_nat_comparisons
- # (pandas.tests.series.test_operators.TestSeriesOperators)
- return self._constructor(na_op(self.values,
- other.astype(object).values),
- index=self.index)
-
- return self._constructor(na_op(self.values, np.asarray(other)),
+ res_values = na_op(self.values, np.asarray(other))
+ return self._constructor(res_values,
index=self.index).__finalize__(self)
- elif isinstance(other, pd.Categorical):
- if not is_categorical_dtype(self):
- msg = ("Cannot compare a Categorical for op {op} with Series "
- "of dtype {typ}.\nIf you want to compare values, use "
- "'series <op> np.asarray(other)'.")
- raise TypeError(msg.format(op=op, typ=self.dtype))
+ elif (isinstance(other, pd.Categorical) and
+ not is_categorical_dtype(self)):
+ raise TypeError("Cannot compare a Categorical for op {op} with "
+ "Series of dtype {typ}.\nIf you want to compare "
+ "values, use 'series <op> np.asarray(other)'."
+ .format(op=op, typ=self.dtype))
+
+ elif is_scalar(other) and isna(other):
+ # numpy does not like comparisons vs None
+ if op is operator.ne:
+ res_values = np.ones(len(self), dtype=bool)
+ else:
+ res_values = np.zeros(len(self), dtype=bool)
+ return self._constructor(res_values, index=self.index,
+ name=self.name, dtype='bool')
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
@@ -877,11 +875,10 @@ def na_op(x, y):
y = _ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
+ # let null fall thru
+ if not isna(y):
+ y = bool(y)
try:
-
- # let null fall thru
- if not isna(y):
- y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
msg = ("cannot compare a dtyped [{dtype}] array "
@@ -899,26 +896,31 @@ def wrapper(self, other):
self, other = _align_method_SERIES(self, other, align_asobject=True)
- if isinstance(other, ABCSeries):
+ if isinstance(other, ABCDataFrame):
+ # Defer to DataFrame implementation; fail early
+ return NotImplemented
+
+ elif isinstance(other, ABCSeries):
name = com._maybe_match_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
- return filler(self._constructor(na_op(self.values, other.values),
- index=self.index, name=name))
- elif isinstance(other, ABCDataFrame):
- return NotImplemented
+ res_values = na_op(self.values, other.values)
+ unfilled = self._constructor(res_values,
+ index=self.index, name=name)
+ return filler(unfilled)
else:
# scalars, list, tuple, np.array
filler = (fill_int if is_self_int_dtype and
is_integer_dtype(np.asarray(other)) else fill_bool)
- return filler(self._constructor(
- na_op(self.values, other),
- index=self.index)).__finalize__(self)
+
+ res_values = na_op(self.values, other)
+ unfilled = self._constructor(res_values, index=self.index)
+ return filler(unfilled).__finalize__(self)
return wrapper
@@ -1023,21 +1025,23 @@ def na_op(x, y):
mask = notna(xrav) & notna(yrav)
xrav = xrav[mask]
- # we may need to manually
- # broadcast a 1 element array
if yrav.shape != mask.shape:
- yrav = np.empty(mask.shape, dtype=yrav.dtype)
- yrav.fill(yrav.item())
+ # FIXME: GH#5284, GH#5035, GH#19448
+ # Without specifically raising here we get mismatched
+ # errors in Py3 (TypeError) vs Py2 (ValueError)
+ raise ValueError('Cannot broadcast operands together.')
yrav = yrav[mask]
- if np.prod(xrav.shape) and np.prod(yrav.shape):
+ if xrav.size:
with np.errstate(all='ignore'):
result[mask] = op(xrav, yrav)
- elif hasattr(x, 'size'):
+
+ elif isinstance(x, np.ndarray):
+ # mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
xrav = xrav[mask]
- if np.prod(xrav.shape):
+ if xrav.size:
with np.errstate(all='ignore'):
result[mask] = op(xrav, y)
else:
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 1c23527cf57c4..62a467bec2683 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -819,4 +819,4 @@ def from_coo(cls, A, dense_index=False):
ops.add_special_arithmetic_methods(SparseSeries,
ops._arith_method_SPARSE_SERIES,
comp_method=ops._arith_method_SPARSE_SERIES,
- bool_method=None, force=True)
+ bool_method=None)
| - Move `isinstance(other, ABCDataFrame)` checks to consistently be the first thing checked in Series ops
- Remove `force` kwarg, define it in the one place it is used.
- Remove kludge for `PeriodIndex`
- Handle categorical_dtype earlier in arith_method_SERIES, decreasing complexity of the closure.
- Handle scalar na other earlier in _comp_method_SERIES, decreasing complexity of the closure.
- Remove broken broadcasting case from _arith_method_FRAME (closes #19421) | https://api.github.com/repos/pandas-dev/pandas/pulls/19448 | 2018-01-29T18:27:20Z | 2018-02-02T11:29:53Z | 2018-02-02T11:29:53Z | 2018-02-04T16:43:27Z |
catch PerformanceWarning | diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 38e5753d1752d..8feee6e6cff68 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -19,6 +19,7 @@
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.nanops as nanops
+from pandas.errors import PerformanceWarning
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
@@ -871,8 +872,9 @@ def test_timedelta64_operations_with_DateOffset(self):
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
- result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
- pd.offsets.Hour(2)])
+ with tm.assert_produces_warning(PerformanceWarning):
+ result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
+ pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
| - [x] closes #19409 | https://api.github.com/repos/pandas-dev/pandas/pulls/19446 | 2018-01-29T17:16:36Z | 2018-01-29T23:59:33Z | 2018-01-29T23:59:33Z | 2018-02-04T16:41:18Z |
Change Future to DeprecationWarning for make_block_same_class | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index ec884035fe0c4..f3e5e4c99a899 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -230,7 +230,7 @@ def make_block_same_class(self, values, placement=None, ndim=None,
if dtype is not None:
# issue 19431 fastparquet is passing this
warnings.warn("dtype argument is deprecated, will be removed "
- "in a future release.", FutureWarning)
+ "in a future release.", DeprecationWarning)
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=ndim,
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index f17306b8b52f9..e3490f465b24a 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -288,9 +288,10 @@ def test_delete(self):
def test_make_block_same_class(self):
# issue 19431
block = create_block('M8[ns, US/Eastern]', [3])
- with tm.assert_produces_warning(FutureWarning,
+ with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
- block.make_block_same_class(block.values, dtype=block.values.dtype)
+ block.make_block_same_class(block.values.values,
+ dtype=block.values.dtype)
class TestDatetimeBlock(object):
| xref https://github.com/pandas-dev/pandas/pull/19434
@jreback I didn't want to further discuss on the PR, so let's do that here :-)
By having it as a FutureWarning, we only annoy users, and the fastparquet developers are already aware of it.
BTW, we do exactly the same for pyarrow's 'misuse' of internal API, we added a deprecationwarning for them. | https://api.github.com/repos/pandas-dev/pandas/pulls/19442 | 2018-01-29T13:00:15Z | 2018-01-29T21:39:09Z | 2018-01-29T21:39:09Z | 2018-01-29T21:39:13Z |
BUG: Fix problem with SparseDataFrame not persisting to csv | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 1890636bc8e1a..ae1d05ecdb008 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -497,7 +497,6 @@ I/O
- Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`)
- :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`)
- :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for xls file type (:issue:`19242`, :issue:`9155`)
--
Plotting
^^^^^^^^
@@ -521,7 +520,7 @@ Sparse
^^^^^^
- Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`)
--
+- Bug in :class:`SparseDataFrame.to_csv` causing exception (:issue:`19384`)
-
Reshaping
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3e5e4c99a899..d06346cc27a28 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -709,7 +709,8 @@ def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
- values = self.values
+ values = self.get_values()
+
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py
new file mode 100644
index 0000000000000..b0243dfde8d3f
--- /dev/null
+++ b/pandas/tests/sparse/frame/test_to_csv.py
@@ -0,0 +1,20 @@
+import numpy as np
+import pytest
+from pandas import SparseDataFrame, read_csv
+from pandas.util import testing as tm
+
+
+class TestSparseDataFrameToCsv(object):
+ fill_values = [np.nan, 0, None, 1]
+
+ @pytest.mark.parametrize('fill_value', fill_values)
+ def test_to_csv_sparse_dataframe(self, fill_value):
+ # GH19384
+ sdf = SparseDataFrame({'a': type(self).fill_values},
+ default_fill_value=fill_value)
+
+ with tm.ensure_clean('sparse_df.csv') as path:
+ sdf.to_csv(path, index=False)
+ df = read_csv(path, skip_blank_lines=False)
+
+ tm.assert_sp_frame_equal(df.to_sparse(fill_value=fill_value), sdf)
| - [x] closes #19384
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19441 | 2018-01-29T08:03:44Z | 2018-02-01T19:26:16Z | 2018-02-01T19:26:16Z | 2018-02-01T19:26:21Z |
TST: fix test for MultiIndexPyIntEngine on 32 bit | diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 9582264a8c716..65332ae7153e2 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1611,7 +1611,7 @@ def test_pyint_engine(self):
index = MultiIndex.from_tuples(keys)
assert index.get_loc(keys[idx]) == idx
- expected = np.arange(idx + 1, dtype='int64')
+ expected = np.arange(idx + 1, dtype=np.intp)
result = index.get_indexer([keys[i] for i in expected])
tm.assert_numpy_array_equal(result, expected)
| - [x] closes #19439
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
It was just a problem in the test. | https://api.github.com/repos/pandas-dev/pandas/pulls/19440 | 2018-01-29T07:47:08Z | 2018-01-29T14:07:03Z | 2018-01-29T14:07:03Z | 2018-01-30T04:47:31Z |
[#19431] Regression in make_block_same_class (tests failing for new fastparquet release) | diff --git a/doc/source/io.rst b/doc/source/io.rst
index ae04996b4fddf..4199f161501ec 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4537,7 +4537,7 @@ See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and
.. note::
These engines are very similar and should read/write nearly identical parquet format files.
- Currently ``pyarrow`` does not support timedelta data, and ``fastparquet`` does not support timezone aware datetimes (they are coerced to UTC).
+ Currently ``pyarrow`` does not support timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes.
These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library).
.. ipython:: python
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index c2d3d0852384c..ec884035fe0c4 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -224,12 +224,17 @@ def make_block_scalar(self, values):
"""
return ScalarBlock(values)
- def make_block_same_class(self, values, placement=None, ndim=None):
+ def make_block_same_class(self, values, placement=None, ndim=None,
+ dtype=None):
""" Wrap given values in a block of same type as self. """
+ if dtype is not None:
+ # issue 19431 fastparquet is passing this
+ warnings.warn("dtype argument is deprecated, will be removed "
+ "in a future release.", FutureWarning)
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=ndim,
- klass=self.__class__)
+ klass=self.__class__, dtype=dtype)
def __unicode__(self):
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 57884e9816ed3..f17306b8b52f9 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -285,6 +285,13 @@ def test_delete(self):
with pytest.raises(Exception):
newb.delete(3)
+ def test_make_block_same_class(self):
+ # issue 19431
+ block = create_block('M8[ns, US/Eastern]', [3])
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ block.make_block_same_class(block.values, dtype=block.values.dtype)
+
class TestDatetimeBlock(object):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 8a6a22abe23fa..244b6f4244252 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -71,6 +71,15 @@ def fp():
return 'fastparquet'
+@pytest.fixture
+def fp_lt_014():
+ if not _HAVE_FASTPARQUET:
+ pytest.skip("fastparquet is not installed")
+ if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
+ pytest.skip("fastparquet is >= 0.1.4")
+ return 'fastparquet'
+
+
@pytest.fixture
def df_compat():
return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'})
@@ -449,8 +458,10 @@ def test_basic(self, fp, df_full):
df = df_full
# additional supported types for fastparquet
+ if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
+ df['datetime_tz'] = pd.date_range('20130101', periods=3,
+ tz='US/Eastern')
df['timedelta'] = pd.timedelta_range('1 day', periods=3)
-
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
@@ -482,14 +493,15 @@ def test_categorical(self, fp):
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
check_round_trip(df, fp)
- def test_datetime_tz(self, fp):
- # doesn't preserve tz
+ def test_datetime_tz(self, fp_lt_014):
+
+ # fastparquet<0.1.4 doesn't preserve tz
df = pd.DataFrame({'a': pd.date_range('20130101', periods=3,
tz='US/Eastern')})
-
# warns on the coercion
with catch_warnings(record=True):
- check_round_trip(df, fp, expected=df.astype('datetime64[ns]'))
+ check_round_trip(df, fp_lt_014,
+ expected=df.astype('datetime64[ns]'))
def test_filter_row_groups(self, fp):
d = {'a': list(range(0, 3))}
| - [x] closes https://github.com/pandas-dev/pandas/issues/19431
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
`dtype` seems still in use at:
https://github.com/minggli/pandas/blob/2f4fc0790a5e5c51eb80bbfadf4c80f0bb424c56/pandas/core/internals.py#L2911 | https://api.github.com/repos/pandas-dev/pandas/pulls/19434 | 2018-01-28T18:59:58Z | 2018-01-29T12:43:00Z | 2018-01-29T12:43:00Z | 2018-01-29T12:46:08Z |
Misc typos | diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 6330a2b36c516..41208125e8f32 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -44,15 +44,15 @@ class Convert(object):
goal_time = 0.5
params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta'])
- param_names = ['contructor', 'replace_data']
+ param_names = ['constructor', 'replace_data']
- def setup(self, contructor, replace_data):
+ def setup(self, constructor, replace_data):
N = 10**3
data = {'Series': pd.Series(np.random.randint(N, size=N)),
'DataFrame': pd.DataFrame({'A': np.random.randint(N, size=N),
'B': np.random.randint(N, size=N)})}
self.to_replace = {i: getattr(pd, replace_data) for i in range(N)}
- self.data = data[contructor]
+ self.data = data[constructor]
- def time_replace(self, contructor, replace_data):
+ def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 45142c53dcd01..59cf7d090a622 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -12,14 +12,14 @@ class Methods(object):
['int', 'float'],
['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt',
'sum', 'corr', 'cov'])
- param_names = ['contructor', 'window', 'dtype', 'method']
+ param_names = ['constructor', 'window', 'dtype', 'method']
- def setup(self, contructor, window, dtype, method):
+ def setup(self, constructor, window, dtype, method):
N = 10**5
arr = np.random.random(N).astype(dtype)
- self.roll = getattr(pd, contructor)(arr).rolling(window)
+ self.roll = getattr(pd, constructor)(arr).rolling(window)
- def time_rolling(self, contructor, window, dtype, method):
+ def time_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
@@ -30,12 +30,12 @@ class Quantile(object):
[10, 1000],
['int', 'float'],
[0, 0.5, 1])
- param_names = ['contructor', 'window', 'dtype', 'percentile']
+ param_names = ['constructor', 'window', 'dtype', 'percentile']
- def setup(self, contructor, window, dtype, percentile):
+ def setup(self, constructor, window, dtype, percentile):
N = 10**5
arr = np.random.random(N).astype(dtype)
- self.roll = getattr(pd, contructor)(arr).rolling(window)
+ self.roll = getattr(pd, constructor)(arr).rolling(window)
- def time_quantile(self, contructor, window, dtype, percentile):
+ def time_quantile(self, constructor, window, dtype, percentile):
self.roll.quantile(percentile)
diff --git a/doc/source/api.rst b/doc/source/api.rst
index ddd09327935ce..44f87aa3e1cec 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2500,7 +2500,7 @@ Scalar introspection
Extensions
----------
-These are primarily intented for library authors looking to extend pandas
+These are primarily intended for library authors looking to extend pandas
objects.
.. currentmodule:: pandas
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ae04996b4fddf..2210cd82ee561 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2675,7 +2675,7 @@ file, and the ``sheet_name`` indicating which sheet to parse.
+++++++++++++++++++
To facilitate working with multiple sheets from the same file, the ``ExcelFile``
-class can be used to wrap the file and can be be passed into ``read_excel``
+class can be used to wrap the file and can be passed into ``read_excel``
There will be a performance benefit for reading multiple sheets as the file is
read into memory only once.
diff --git a/doc/sphinxext/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/tests/test_docscrape.py
index b682504e1618f..b412124d774bb 100755
--- a/doc/sphinxext/numpydoc/tests/test_docscrape.py
+++ b/doc/sphinxext/numpydoc/tests/test_docscrape.py
@@ -42,7 +42,7 @@
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
+ shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
@@ -222,7 +222,7 @@ def test_str():
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
+ shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
@@ -340,7 +340,7 @@ def test_sphinx_str():
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
+ shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 1e6ea7794dfff..37693068e0974 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -897,7 +897,7 @@ class Timedelta(_Timedelta):
Represents a duration, the difference between two dates or times.
Timedelta is the pandas equivalent of python's ``datetime.timedelta``
- and is interchangable with it in most cases.
+ and is interchangeable with it in most cases.
Parameters
----------
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index c22e0b8e555a3..215ae9ce087ee 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -295,7 +295,7 @@ cpdef bint tz_compare(object start, object end):
timezones. For example
`<DstTzInfo 'Europe/Paris' LMT+0:09:00 STD>` and
`<DstTzInfo 'Europe/Paris' CET+1:00:00 STD>` are essentially same
- timezones but aren't evaluted such, but the string representation
+ timezones but aren't evaluated such, but the string representation
for both of these is `'Europe/Paris'`.
This exists only to add a notion of equality to pytz-style zones
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7328cd336babf..788b236b0ec59 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4115,7 +4115,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
- # if we have different dtypes, possibily promote
+ # if we have different dtypes, possibly promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index f43c6dc567f69..8e77c7a7fa48c 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -332,7 +332,7 @@ def freqstr(self):
@cache_readonly
def inferred_freq(self):
"""
- Trys to return a string representing a frequency guess,
+ Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 99bf0d5b7ac51..91dc44e3f185e 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -120,7 +120,7 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None,
if dtype is not None:
mgr = mgr.astype(dtype)
else:
- msg = ('SparseDataFrame called with unkown type "{data_type}" '
+ msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 4e207f9d1838c..1c23527cf57c4 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -493,7 +493,7 @@ def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
- # and possibily change the index
+ # and possibly change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 5c31b9a5668ff..12c7feb5f2b15 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1395,7 +1395,7 @@ def _validate(data):
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
- # see scc/inferrence.pyx which can contain string values
+ # see src/inference.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if data.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 0c82773b75c28..7edb5b16ce77a 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -210,7 +210,7 @@ def _hash_categorical(c, encoding, hash_key):
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
- # instead, directly construt the result with a
+ # instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 2293032ebb8a1..bca0b64cb53fe 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1961,7 +1961,7 @@ def formatter(value):
def get_result_as_array(self):
"""
Returns the float values converted into strings using
- the parameters given at initalisation, as a numpy array
+ the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 106823199ee93..5376473f83f22 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3763,7 +3763,7 @@ def write(self, **kwargs):
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
- (possibily) already existing appendable table this table ALLOWS
+ (possibly) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
diff --git a/pandas/tests/categorical/test_constructors.py b/pandas/tests/categorical/test_constructors.py
index b29d75bed5c6f..6cc34770a65e0 100644
--- a/pandas/tests/categorical/test_constructors.py
+++ b/pandas/tests/categorical/test_constructors.py
@@ -382,7 +382,7 @@ def test_constructor_from_categorical_with_unknown_dtype(self):
ordered=True)
tm.assert_categorical_equal(result, expected)
- def test_contructor_from_categorical_string(self):
+ def test_constructor_from_categorical_string(self):
values = Categorical(['a', 'b', 'd'])
# use categories, ordered
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 8b57e96e6fa06..b24ae22162a34 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -543,7 +543,7 @@ def test_nested_dict_frame_constructor(self):
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
- # mat: 2d matrix with shpae (3, 2) to input. empty - makes sized
+ # mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index b277d8256e612..e0ce27de5c31f 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2531,7 +2531,7 @@ def test_date_tz(self):
[datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
- def test_date_explict_date_format(self):
+ def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT")
assert formatted[0] == "02-01-2003"
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index f2b7c20b774b0..0e6e44e839464 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -43,7 +43,7 @@ def test_empty(self, method, unit, use_bottleneck):
result = getattr(s, method)()
assert result == unit
- # Explict
+ # Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 7505e6b0cec3b..38e5753d1752d 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1163,7 +1163,7 @@ def test_timedelta_floordiv(self, scalar_td):
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64_series_with_tdi(self, names):
# GH#17250 make sure result dtype is correct
- # GH#19043 make sure names are propogated correctly
+ # GH#19043 make sure names are propagated correctly
tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 2b589ebd4735e..0b7948cc32d24 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -218,7 +218,7 @@ def test_constructor_from_unknown_type(self):
class Unknown:
pass
with pytest.raises(TypeError,
- message='SparseDataFrame called with unkown type '
+ message='SparseDataFrame called with unknown type '
'"Unknown" for data argument'):
SparseDataFrame(Unknown())
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 941bdcbc8b064..0009e26f8b100 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2401,7 +2401,7 @@ class for all warnings. To check that no warning is returned,
into errors.
Valid values are:
- * "error" - turns matching warnings into exeptions
+ * "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
| Found via `codespell -q 3` | https://api.github.com/repos/pandas-dev/pandas/pulls/19430 | 2018-01-28T14:22:35Z | 2018-01-29T14:14:34Z | 2018-01-29T14:14:34Z | 2018-01-29T14:22:40Z |
DOC: Spellcheck of categorical.rst and visualization.rst | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 7364167611730..efcc04d688334 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -19,10 +19,11 @@ Categorical Data
This is an introduction to pandas categorical data type, including a short comparison
with R's ``factor``.
-`Categoricals` are a pandas data type, which correspond to categorical variables in
-statistics: a variable, which can take on only a limited, and usually fixed,
-number of possible values (`categories`; `levels` in R). Examples are gender, social class,
-blood types, country affiliations, observation time or ratings via Likert scales.
+`Categoricals` are a pandas data type corresponding to categorical variables in
+statistics. A categorical variable takes on a limited, and usually fixed,
+number of possible values (`categories`; `levels` in R). Examples are gender,
+social class, blood type, country affiliation, observation time or rating via
+Likert scales.
In contrast to statistical categorical variables, categorical data might have an order (e.g.
'strongly agree' vs 'agree' or 'first observation' vs. 'second observation'), but numerical
@@ -48,16 +49,16 @@ See also the :ref:`API docs on categoricals<api.categorical>`.
Object Creation
---------------
-Categorical `Series` or columns in a `DataFrame` can be created in several ways:
+Categorical ``Series`` or columns in a ``DataFrame`` can be created in several ways:
-By specifying ``dtype="category"`` when constructing a `Series`:
+By specifying ``dtype="category"`` when constructing a ``Series``:
.. ipython:: python
s = pd.Series(["a","b","c","a"], dtype="category")
s
-By converting an existing `Series` or column to a ``category`` dtype:
+By converting an existing ``Series`` or column to a ``category`` dtype:
.. ipython:: python
@@ -65,18 +66,17 @@ By converting an existing `Series` or column to a ``category`` dtype:
df["B"] = df["A"].astype('category')
df
-By using some special functions:
+By using special functions, such as :func:`~pandas.cut`, which groups data into
+discrete bins. See the :ref:`example on tiling <reshaping.tile.cut>` in the docs.
.. ipython:: python
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
- labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10) ]
+ labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
df.head(10)
-See :ref:`documentation <reshaping.tile.cut>` for :func:`~pandas.cut`.
-
By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to a `DataFrame`.
.. ipython:: python
@@ -89,10 +89,11 @@ By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to
df["B"] = raw_cat
df
-Anywhere above we passed a keyword ``dtype='category'``, we used the default behavior of
+In the examples above where we passed ``dtype='category'``, we used the default
+behavior:
-1. categories are inferred from the data
-2. categories are unordered.
+1. Categories are inferred from the data.
+2. Categories are unordered.
To control those behaviors, instead of passing ``'category'``, use an instance
of :class:`~pandas.api.types.CategoricalDtype`.
@@ -123,8 +124,8 @@ Categorical data has a specific ``category`` :ref:`dtype <basics.dtypes>`:
In contrast to R's `factor` function, there is currently no way to assign/change labels at
creation time. Use `categories` to change the categories after creation time.
-To get back to the original Series or `numpy` array, use ``Series.astype(original_dtype)`` or
-``np.asarray(categorical)``:
+To get back to the original ``Series`` or NumPy array, use
+``Series.astype(original_dtype)`` or ``np.asarray(categorical)``:
.. ipython:: python
@@ -135,8 +136,9 @@ To get back to the original Series or `numpy` array, use ``Series.astype(origina
s2.astype(str)
np.asarray(s2)
-If you have already `codes` and `categories`, you can use the :func:`~pandas.Categorical.from_codes`
-constructor to save the factorize step during normal constructor mode:
+If you already have `codes` and `categories`, you can use the
+:func:`~pandas.Categorical.from_codes` constructor to save the factorize step
+during normal constructor mode:
.. ipython:: python
@@ -171,7 +173,7 @@ by default.
A :class:`~pandas.api.types.CategoricalDtype` can be used in any place pandas
expects a `dtype`. For example :func:`pandas.read_csv`,
-:func:`pandas.DataFrame.astype`, or in the Series constructor.
+:func:`pandas.DataFrame.astype`, or in the ``Series`` constructor.
.. note::
@@ -185,8 +187,8 @@ Equality Semantics
~~~~~~~~~~~~~~~~~~
Two instances of :class:`~pandas.api.types.CategoricalDtype` compare equal
-whenever they have the same categories and orderedness. When comparing two
-unordered categoricals, the order of the ``categories`` is not considered
+whenever they have the same categories and order. When comparing two
+unordered categoricals, the order of the ``categories`` is not considered.
.. ipython:: python
@@ -198,7 +200,7 @@ unordered categoricals, the order of the ``categories`` is not considered
# Unequal, since the second CategoricalDtype is ordered
c1 == CategoricalDtype(['a', 'b', 'c'], ordered=True)
-All instances of ``CategoricalDtype`` compare equal to the string ``'category'``
+All instances of ``CategoricalDtype`` compare equal to the string ``'category'``.
.. ipython:: python
@@ -215,8 +217,8 @@ All instances of ``CategoricalDtype`` compare equal to the string ``'category'``
Description
-----------
-Using ``.describe()`` on categorical data will produce similar output to a `Series` or
-`DataFrame` of type ``string``.
+Using :meth:`~DataFrame.describe` on categorical data will produce similar
+output to a ``Series`` or ``DataFrame`` of type ``string``.
.. ipython:: python
@@ -230,10 +232,10 @@ Using ``.describe()`` on categorical data will produce similar output to a `Seri
Working with categories
-----------------------
-Categorical data has a `categories` and a `ordered` property, which list their possible values and
-whether the ordering matters or not. These properties are exposed as ``s.cat.categories`` and
-``s.cat.ordered``. If you don't manually specify categories and ordering, they are inferred from the
-passed in values.
+Categorical data has a `categories` and a `ordered` property, which list their
+possible values and whether the ordering matters or not. These properties are
+exposed as ``s.cat.categories`` and ``s.cat.ordered``. If you don't manually
+specify categories and ordering, they are inferred from the passed arguments.
.. ipython:: python
@@ -251,13 +253,13 @@ It's also possible to pass in the categories in a specific order:
.. note::
- New categorical data are NOT automatically ordered. You must explicitly pass ``ordered=True`` to
- indicate an ordered ``Categorical``.
+ New categorical data are **not** automatically ordered. You must explicitly
+ pass ``ordered=True`` to indicate an ordered ``Categorical``.
.. note::
- The result of ``Series.unique()`` is not always the same as ``Series.cat.categories``,
+ The result of :meth:`~Series.unique` is not always the same as ``Series.cat.categories``,
because ``Series.unique()`` has a couple of guarantees, namely that it returns categories
in the order of appearance, and it only includes values that are actually present.
@@ -275,8 +277,10 @@ It's also possible to pass in the categories in a specific order:
Renaming categories
~~~~~~~~~~~~~~~~~~~
-Renaming categories is done by assigning new values to the ``Series.cat.categories`` property or
-by using the :func:`Categorical.rename_categories` method:
+Renaming categories is done by assigning new values to the
+``Series.cat.categories`` property or by using the
+:meth:`~pandas.Categorical.rename_categories` method:
+
.. ipython:: python
@@ -296,8 +300,8 @@ by using the :func:`Categorical.rename_categories` method:
.. note::
- Be aware that assigning new categories is an inplace operations, while most other operation
- under ``Series.cat`` per default return a new Series of dtype `category`.
+ Be aware that assigning new categories is an inplace operation, while most other operations
+ under ``Series.cat`` per default return a new ``Series`` of dtype `category`.
Categories must be unique or a `ValueError` is raised:
@@ -320,7 +324,8 @@ Categories must also not be ``NaN`` or a `ValueError` is raised:
Appending new categories
~~~~~~~~~~~~~~~~~~~~~~~~
-Appending categories can be done by using the :func:`Categorical.add_categories` method:
+Appending categories can be done by using the
+:meth:`~pandas.Categorical.add_categories` method:
.. ipython:: python
@@ -331,8 +336,9 @@ Appending categories can be done by using the :func:`Categorical.add_categories`
Removing categories
~~~~~~~~~~~~~~~~~~~
-Removing categories can be done by using the :func:`Categorical.remove_categories` method. Values
-which are removed are replaced by ``np.nan``.:
+Removing categories can be done by using the
+:meth:`~pandas.Categorical.remove_categories` method. Values which are removed
+are replaced by ``np.nan``.:
.. ipython:: python
@@ -353,8 +359,10 @@ Removing unused categories can also be done:
Setting categories
~~~~~~~~~~~~~~~~~~
-If you want to do remove and add new categories in one step (which has some speed advantage),
-or simply set the categories to a predefined scale, use :func:`Categorical.set_categories`.
+If you want to do remove and add new categories in one step (which has some
+speed advantage), or simply set the categories to a predefined scale,
+use :meth:`~pandas.Categorical.set_categories`.
+
.. ipython:: python
@@ -366,7 +374,7 @@ or simply set the categories to a predefined scale, use :func:`Categorical.set_c
.. note::
Be aware that :func:`Categorical.set_categories` cannot know whether some category is omitted
intentionally or because it is misspelled or (under Python3) due to a type difference (e.g.,
- numpys S1 dtype and Python strings). This can result in surprising behaviour!
+ NumPy S1 dtype and Python strings). This can result in surprising behaviour!
Sorting and Order
-----------------
@@ -374,7 +382,7 @@ Sorting and Order
.. _categorical.sort:
If categorical data is ordered (``s.cat.ordered == True``), then the order of the categories has a
-meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a `TypeError`.
+meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a ``TypeError``.
.. ipython:: python
@@ -411,8 +419,8 @@ This is even true for strings and numeric data:
Reordering
~~~~~~~~~~
-Reordering the categories is possible via the :func:`Categorical.reorder_categories` and
-the :func:`Categorical.set_categories` methods. For :func:`Categorical.reorder_categories`, all
+Reordering the categories is possible via the :meth:`Categorical.reorder_categories` and
+the :meth:`Categorical.set_categories` methods. For :meth:`Categorical.reorder_categories`, all
old categories must be included in the new categories and no new categories are allowed. This will
necessarily make the sort order the same as the categories order.
@@ -428,16 +436,16 @@ necessarily make the sort order the same as the categories order.
.. note::
Note the difference between assigning new categories and reordering the categories: the first
- renames categories and therefore the individual values in the `Series`, but if the first
+ renames categories and therefore the individual values in the ``Series``, but if the first
position was sorted last, the renamed value will still be sorted last. Reordering means that the
way values are sorted is different afterwards, but not that individual values in the
- `Series` are changed.
+ ``Series`` are changed.
.. note::
- If the `Categorical` is not ordered, ``Series.min()`` and ``Series.max()`` will raise
+ If the ``Categorical`` is not ordered, :meth:`Series.min` and :meth:`Series.max` will raise
``TypeError``. Numeric operations like ``+``, ``-``, ``*``, ``/`` and operations based on them
- (e.g. ``Series.median()``, which would need to compute the mean between two values if the length
+ (e.g. :meth:`Series.median`, which would need to compute the mean between two values if the length
of an array is even) do not work and raise a ``TypeError``.
Multi Column Sorting
@@ -464,19 +472,19 @@ Comparisons
Comparing categorical data with other objects is possible in three cases:
- * comparing equality (``==`` and ``!=``) to a list-like object (list, Series, array,
+ * Comparing equality (``==`` and ``!=``) to a list-like object (list, Series, array,
...) of the same length as the categorical data.
- * all comparisons (``==``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``) of categorical data to
+ * All comparisons (``==``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``) of categorical data to
another categorical Series, when ``ordered==True`` and the `categories` are the same.
- * all comparisons of a categorical data to a scalar.
+ * All comparisons of a categorical data to a scalar.
All other comparisons, especially "non-equality" comparisons of two categoricals with different
-categories or a categorical with any list-like object, will raise a TypeError.
+categories or a categorical with any list-like object, will raise a ``TypeError``.
.. note::
- Any "non-equality" comparisons of categorical data with a `Series`, `np.array`, `list` or
- categorical data with different categories or ordering will raise an `TypeError` because custom
+ Any "non-equality" comparisons of categorical data with a ``Series``, ``np.array``, ``list`` or
+ categorical data with different categories or ordering will raise a ``TypeError`` because custom
categories ordering could be interpreted in two ways: one with taking into account the
ordering and one without.
@@ -546,11 +554,11 @@ When you compare two unordered categoricals with the same categories, the order
Operations
----------
-Apart from ``Series.min()``, ``Series.max()`` and ``Series.mode()``, the following operations are
-possible with categorical data:
+Apart from :meth:`Series.min`, :meth:`Series.max` and :meth:`Series.mode`, the
+following operations are possible with categorical data:
-`Series` methods like `Series.value_counts()` will use all categories, even if some categories are not
-present in the data:
+``Series`` methods like :meth:`Series.value_counts` will use all categories,
+even if some categories are not present in the data:
.. ipython:: python
@@ -588,8 +596,8 @@ that only values already in `categories` can be assigned.
Getting
~~~~~~~
-If the slicing operation returns either a `DataFrame` or a column of type `Series`,
-the ``category`` dtype is preserved.
+If the slicing operation returns either a ``DataFrame`` or a column of type
+``Series``, the ``category`` dtype is preserved.
.. ipython:: python
@@ -602,8 +610,8 @@ the ``category`` dtype is preserved.
df.loc["h":"j","cats"]
df[df["cats"] == "b"]
-An example where the category type is not preserved is if you take one single row: the
-resulting `Series` is of dtype ``object``:
+An example where the category type is not preserved is if you take one single
+row: the resulting ``Series`` is of dtype ``object``:
.. ipython:: python
@@ -620,10 +628,11 @@ of length "1".
df.at["h","cats"] # returns a string
.. note::
- This is a difference to R's `factor` function, where ``factor(c(1,2,3))[1]``
+ The is in contrast to R's `factor` function, where ``factor(c(1,2,3))[1]``
returns a single value `factor`.
-To get a single value `Series` of type ``category`` pass in a list with a single value:
+To get a single value ``Series`` of type ``category``, you pass in a list with
+a single value:
.. ipython:: python
@@ -632,8 +641,8 @@ To get a single value `Series` of type ``category`` pass in a list with a single
String and datetime accessors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The accessors ``.dt`` and ``.str`` will work if the ``s.cat.categories`` are of an appropriate
-type:
+The accessors ``.dt`` and ``.str`` will work if the ``s.cat.categories`` are of
+an appropriate type:
.. ipython:: python
@@ -676,8 +685,8 @@ That means, that the returned values from methods and properties on the accessor
Setting
~~~~~~~
-Setting values in a categorical column (or `Series`) works as long as the value is included in the
-`categories`:
+Setting values in a categorical column (or ``Series``) works as long as the
+value is included in the `categories`:
.. ipython:: python
@@ -704,7 +713,7 @@ Setting values by assigning categorical data will also check that the `categorie
except ValueError as e:
print("ValueError: " + str(e))
-Assigning a `Categorical` to parts of a column of other types will use the values:
+Assigning a ``Categorical`` to parts of a column of other types will use the values:
.. ipython:: python
@@ -719,7 +728,7 @@ Assigning a `Categorical` to parts of a column of other types will use the value
Merging
~~~~~~~
-You can concat two `DataFrames` containing categorical data together,
+You can concat two ``DataFrames`` containing categorical data together,
but the categories of these categoricals need to be the same:
.. ipython:: python
@@ -731,7 +740,7 @@ but the categories of these categoricals need to be the same:
res
res.dtypes
-In this case the categories are not the same and so an error is raised:
+In this case the categories are not the same, and therefore an error is raised:
.. ipython:: python
@@ -754,10 +763,10 @@ Unioning
.. versionadded:: 0.19.0
-If you want to combine categoricals that do not necessarily have
-the same categories, the ``union_categoricals`` function will
-combine a list-like of categoricals. The new categories
-will be the union of the categories being combined.
+If you want to combine categoricals that do not necessarily have the same
+categories, the :func:`~pandas.api.types.union_categoricals` function will
+combine a list-like of categoricals. The new categories will be the union of
+the categories being combined.
.. ipython:: python
@@ -805,8 +814,9 @@ using the ``ignore_ordered=True`` argument.
b = pd.Categorical(["c", "b", "a"], ordered=True)
union_categoricals([a, b], ignore_order=True)
-``union_categoricals`` also works with a ``CategoricalIndex``, or ``Series`` containing
-categorical data, but note that the resulting array will always be a plain ``Categorical``
+:func:`~pandas.api.types.union_categoricals` also works with a
+``CategoricalIndex``, or ``Series`` containing categorical data, but note that
+the resulting array will always be a plain ``Categorical``:
.. ipython:: python
@@ -956,7 +966,7 @@ Differences to R's `factor`
The following differences to R's factor functions can be observed:
-* R's `levels` are named `categories`
+* R's `levels` are named `categories`.
* R's `levels` are always of type string, while `categories` in pandas can be of any dtype.
* It's not possible to specify labels at creation time. Use ``s.cat.rename_categories(new_labels)``
afterwards.
@@ -1009,10 +1019,10 @@ an ``object`` dtype is a constant times the length of the data.
`Categorical` is not a `numpy` array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Currently, categorical data and the underlying `Categorical` is implemented as a python
-object and not as a low-level `numpy` array dtype. This leads to some problems.
+Currently, categorical data and the underlying ``Categorical`` is implemented as a Python
+object and not as a low-level NumPy array dtype. This leads to some problems.
-`numpy` itself doesn't know about the new `dtype`:
+NumPy itself doesn't know about the new `dtype`:
.. ipython:: python
@@ -1041,7 +1051,7 @@ To check if a Series contains Categorical data, use ``hasattr(s, 'cat')``:
hasattr(pd.Series(['a'], dtype='category'), 'cat')
hasattr(pd.Series(['a']), 'cat')
-Using `numpy` functions on a `Series` of type ``category`` should not work as `Categoricals`
+Using NumPy functions on a ``Series`` of type ``category`` should not work as `Categoricals`
are not numeric data (even in the case that ``.categories`` is numeric).
.. ipython:: python
@@ -1080,7 +1090,7 @@ and allows efficient indexing and storage of an index with a large number of dup
See the :ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed
explanation.
-Setting the index will create a ``CategoricalIndex``
+Setting the index will create a ``CategoricalIndex``:
.. ipython:: python
@@ -1095,8 +1105,9 @@ Setting the index will create a ``CategoricalIndex``
Side Effects
~~~~~~~~~~~~
-Constructing a `Series` from a `Categorical` will not copy the input `Categorical`. This
-means that changes to the `Series` will in most cases change the original `Categorical`:
+Constructing a ``Series`` from a ``Categorical`` will not copy the input
+``Categorical``. This means that changes to the ``Series`` will in most cases
+change the original ``Categorical``:
.. ipython:: python
@@ -1109,7 +1120,7 @@ means that changes to the `Series` will in most cases change the original `Categ
df["cat"].cat.categories = [1,2,3,4,5]
cat
-Use ``copy=True`` to prevent such a behaviour or simply don't reuse `Categoricals`:
+Use ``copy=True`` to prevent such a behaviour or simply don't reuse ``Categoricals``:
.. ipython:: python
@@ -1120,6 +1131,6 @@ Use ``copy=True`` to prevent such a behaviour or simply don't reuse `Categorical
cat
.. note::
- This also happens in some cases when you supply a `numpy` array instead of a `Categorical`:
- using an int array (e.g. ``np.array([1,2,3,4])``) will exhibit the same behaviour, while using
+ This also happens in some cases when you supply a NumPy array instead of a ``Categorical``:
+ using an int array (e.g. ``np.array([1,2,3,4])``) will exhibit the same behavior, while using
a string array (e.g. ``np.array(["a","b","c","a"])``) will not.
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index cbd17493beb7e..ee93f06fbc958 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -37,7 +37,8 @@ libraries that go beyond the basics documented here.
Basic Plotting: ``plot``
------------------------
-See the :ref:`cookbook<cookbook.plotting>` for some advanced strategies
+We will demonstrate the basics, see the :ref:`cookbook<cookbook.plotting>` for
+some advanced strategies.
The ``plot`` method on Series and DataFrame is just a simple wrapper around
:meth:`plt.plot() <matplotlib.axes.Axes.plot>`:
@@ -94,7 +95,8 @@ You can plot one column versus another using the `x` and `y` keywords in
.. note::
- For more formatting and styling options, see :ref:`below <visualization.formatting>`.
+ For more formatting and styling options, see
+ :ref:`formatting <visualization.formatting>` below.
.. ipython:: python
:suppress:
@@ -107,14 +109,13 @@ Other Plots
-----------
Plotting methods allow for a handful of plot styles other than the
-default Line plot. These methods can be provided as the ``kind``
-keyword argument to :meth:`~DataFrame.plot`.
-These include:
+default line plot. These methods can be provided as the ``kind``
+keyword argument to :meth:`~DataFrame.plot`, and include:
* :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots
* :ref:`'hist' <visualization.hist>` for histogram
* :ref:`'box' <visualization.box>` for boxplot
-* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots
+* :ref:`'kde' <visualization.kde>` or :ref:`'density' <visualization.kde>` for density plots
* :ref:`'area' <visualization.area_plot>` for area plots
* :ref:`'scatter' <visualization.scatter>` for scatter plots
* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots
@@ -220,7 +221,7 @@ To get horizontal bar plots, use the ``barh`` method:
Histograms
~~~~~~~~~~
-Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods.
+Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods.
.. ipython:: python
@@ -238,7 +239,8 @@ Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Serie
plt.close('all')
-Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`` keyword.
+A histogram can be stacked using ``stacked=True``. Bin size can be changed
+using the ``bins`` keyword.
.. ipython:: python
@@ -252,7 +254,9 @@ Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`
plt.close('all')
-You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histogram can be drawn by ``orientation='horizontal'`` and ``cumulative=True``.
+You can pass other keywords supported by matplotlib ``hist``. For example,
+horizontal and cumulative histograms can be drawn by
+``orientation='horizontal'`` and ``cumulative=True``.
.. ipython:: python
@@ -463,7 +467,7 @@ keyword, will affect the output type as well:
``'both'`` Yes Series of namedtuples
================ ======= ==========================
-``Groupby.boxplot`` always returns a Series of ``return_type``.
+``Groupby.boxplot`` always returns a ``Series`` of ``return_type``.
.. ipython:: python
:okwarning:
@@ -481,7 +485,9 @@ keyword, will affect the output type as well:
plt.close('all')
-Compare to:
+The subplots above are split by the numeric columns first, then the value of
+the ``g`` column. Below the subplots are first split by the value of ``g``,
+then by the numeric columns.
.. ipython:: python
:okwarning:
@@ -536,8 +542,8 @@ Scatter Plot
~~~~~~~~~~~~
Scatter plot can be drawn by using the :meth:`DataFrame.plot.scatter` method.
-Scatter plot requires numeric columns for x and y axis.
-These can be specified by ``x`` and ``y`` keywords each.
+Scatter plot requires numeric columns for the x and y axes.
+These can be specified by the ``x`` and ``y`` keywords.
.. ipython:: python
:suppress:
@@ -581,8 +587,9 @@ each point:
plt.close('all')
-You can pass other keywords supported by matplotlib ``scatter``.
-Below example shows a bubble chart using a dataframe column values as bubble size.
+You can pass other keywords supported by matplotlib
+:meth:`scatter <matplotlib.axes.Axes.scatter>`. The example below shows a
+bubble chart using a column of the ``DataFrame`` as the bubble size.
.. ipython:: python
@@ -631,7 +638,7 @@ You can specify alternative aggregations by passing values to the ``C`` and
and ``reduce_C_function`` is a function of one argument that reduces all the
values in a bin to a single number (e.g. ``mean``, ``max``, ``sum``, ``std``). In this
example the positions are given by columns ``a`` and ``b``, while the value is
-given by column ``z``. The bins are aggregated with numpy's ``max`` function.
+given by column ``z``. The bins are aggregated with NumPy's ``max`` function.
.. ipython:: python
:suppress:
@@ -685,14 +692,16 @@ A ``ValueError`` will be raised if there are any negative values in your data.
plt.close('all')
-For pie plots it's best to use square figures, one's with an equal aspect ratio. You can create the
-figure with equal width and height, or force the aspect ratio to be equal after plotting by
-calling ``ax.set_aspect('equal')`` on the returned ``axes`` object.
+For pie plots it's best to use square figures, i.e. a figure aspect ratio 1.
+You can create the figure with equal width and height, or force the aspect ratio
+to be equal after plotting by calling ``ax.set_aspect('equal')`` on the returned
+``axes`` object.
-Note that pie plot with :class:`DataFrame` requires that you either specify a target column by the ``y``
-argument or ``subplots=True``. When ``y`` is specified, pie plot of selected column
-will be drawn. If ``subplots=True`` is specified, pie plots for each column are drawn as subplots.
-A legend will be drawn in each pie plots by default; specify ``legend=False`` to hide it.
+Note that pie plot with :class:`DataFrame` requires that you either specify a
+target column by the ``y`` argument or ``subplots=True``. When ``y`` is
+specified, pie plot of selected column will be drawn. If ``subplots=True`` is
+specified, pie plots for each column are drawn as subplots. A legend will be
+drawn in each pie plots by default; specify ``legend=False`` to hide it.
.. ipython:: python
:suppress:
@@ -762,7 +771,7 @@ See the `matplotlib pie documentation <http://matplotlib.org/api/pyplot_api.html
Plotting with Missing Data
--------------------------
-Pandas tries to be pragmatic about plotting DataFrames or Series
+Pandas tries to be pragmatic about plotting ``DataFrames`` or ``Series``
that contain missing data. Missing values are dropped, left out, or filled
depending on the plot type.
@@ -861,7 +870,8 @@ Andrews Curves
Andrews curves allow one to plot multivariate data as a large number
of curves that are created using the attributes of samples as coefficients
-for Fourier series. By coloring these curves differently for each class
+for Fourier series, see the `Wikipedia entry<https://en.wikipedia.org/wiki/Andrews_plot>`_
+for more information. By coloring these curves differently for each class
it is possible to visualize data clustering. Curves belonging to samples
of the same class will usually be closer together and form larger structures.
@@ -883,8 +893,10 @@ of the same class will usually be closer together and form larger structures.
Parallel Coordinates
~~~~~~~~~~~~~~~~~~~~
-Parallel coordinates is a plotting technique for plotting multivariate data.
-It allows one to see clusters in data and to estimate other statistics visually.
+Parallel coordinates is a plotting technique for plotting multivariate data,
+see the `Wikipedia entry<https://en.wikipedia.org/wiki/Parallel_coordinates>`_
+for an introduction.
+Parallel coordinates allows one to see clusters in data and to estimate other statistics visually.
Using parallel coordinates points are represented as connected line segments.
Each vertical line represents one attribute. One set of connected line segments
represents one data point. Points that tend to cluster will appear closer together.
@@ -912,7 +924,9 @@ Lag Plot
Lag plots are used to check if a data set or time series is random. Random
data should not exhibit any structure in the lag plot. Non-random structure
-implies that the underlying data are not random.
+implies that the underlying data are not random. The ``lag`` argument may
+be passed, and when ``lag=1`` the plot is essentially ``data[:-1]`` vs.
+``data[1:]``.
.. ipython:: python
:suppress:
@@ -947,7 +961,9 @@ If time series is random, such autocorrelations should be near zero for any and
all time-lag separations. If time series is non-random then one or more of the
autocorrelations will be significantly non-zero. The horizontal lines displayed
in the plot correspond to 95% and 99% confidence bands. The dashed line is 99%
-confidence band.
+confidence band. See the
+`Wikipedia entry<https://en.wikipedia.org/wiki/Correlogram>`_ for more about
+autocorrelation plots.
.. ipython:: python
:suppress:
@@ -1016,6 +1032,8 @@ unit interval). The point in the plane, where our sample settles to (where the
forces acting on our sample are at an equilibrium) is where a dot representing
our sample will be drawn. Depending on which class that sample belongs it will
be colored differently.
+See the R package `Radviz<https://cran.r-project.org/web/packages/Radviz/>`_
+for more information.
**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__.
@@ -1046,7 +1064,7 @@ Setting the plot style
From version 1.5 and up, matplotlib offers a range of preconfigured plotting styles. Setting the
style can be used to easily give plots the general look that you want.
Setting the style is as easy as calling ``matplotlib.style.use(my_plot_style)`` before
-creating your plot. For example you could do ``matplotlib.style.use('ggplot')`` for ggplot-style
+creating your plot. For example you could write ``matplotlib.style.use('ggplot')`` for ggplot-style
plots.
You can see the various available style names at ``matplotlib.style.available`` and it's very
@@ -1147,7 +1165,7 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword:
plt.close('all')
-To plot some columns in a DataFrame, give the column names to the ``secondary_y``
+To plot some columns in a ``DataFrame``, give the column names to the ``secondary_y``
keyword:
.. ipython:: python
@@ -1248,7 +1266,7 @@ See the :meth:`autofmt_xdate <matplotlib.figure.autofmt_xdate>` method and the
Subplots
~~~~~~~~
-Each Series in a DataFrame can be plotted on a different axis
+Each ``Series`` in a ``DataFrame`` can be plotted on a different axis
with the ``subplots`` keyword:
.. ipython:: python
@@ -1264,9 +1282,9 @@ with the ``subplots`` keyword:
Using Layout and Targeting Multiple Axes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The layout of subplots can be specified by ``layout`` keyword. It can accept
+The layout of subplots can be specified by the ``layout`` keyword. It can accept
``(rows, columns)``. The ``layout`` keyword can be used in
-``hist`` and ``boxplot`` also. If input is invalid, ``ValueError`` will be raised.
+``hist`` and ``boxplot`` also. If the input is invalid, a ``ValueError`` will be raised.
The number of axes which can be contained by rows x columns specified by ``layout`` must be
larger than the number of required subplots. If layout can contain more axes than required,
@@ -1284,7 +1302,7 @@ or columns needed, given the other.
plt.close('all')
-The above example is identical to using
+The above example is identical to using:
.. ipython:: python
@@ -1298,11 +1316,11 @@ The above example is identical to using
The required number of columns (3) is inferred from the number of series to plot
and the given number of rows (2).
-Also, you can pass multiple axes created beforehand as list-like via ``ax`` keyword.
-This allows to use more complicated layout.
+You can pass multiple axes created beforehand as list-like via ``ax`` keyword.
+This allows more complicated layouts.
The passed axes must be the same number as the subplots being drawn.
-When multiple axes are passed via ``ax`` keyword, ``layout``, ``sharex`` and ``sharey`` keywords
+When multiple axes are passed via the ``ax`` keyword, ``layout``, ``sharex`` and ``sharey`` keywords
don't affect to the output. You should explicitly pass ``sharex=False`` and ``sharey=False``,
otherwise you will see a warning.
@@ -1359,13 +1377,13 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
Plotting With Error Bars
~~~~~~~~~~~~~~~~~~~~~~~~
-Plotting with error bars is now supported in the :meth:`DataFrame.plot` and :meth:`Series.plot`
+Plotting with error bars is supported in :meth:`DataFrame.plot` and :meth:`Series.plot`.
-Horizontal and vertical errorbars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats.
+Horizontal and vertical error bars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats:
-- As a :class:`DataFrame` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting :class:`DataFrame` or matching the ``name`` attribute of the :class:`Series`
-- As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values
-- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`
+- As a :class:`DataFrame` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting :class:`DataFrame` or matching the ``name`` attribute of the :class:`Series`.
+- As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values.
+- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`.
Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length :class:`Series`, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array.
@@ -1420,7 +1438,10 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :
plt.close('all')
-Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example.
+Also, you can pass a different :class:`DataFrame` or :class:`Series` to the
+``table`` keyword. The data will be drawn as displayed in print method
+(not transposed automatically). If required, it should be transposed manually
+as seen in the example below.
.. ipython:: python
@@ -1434,7 +1455,10 @@ Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table``
plt.close('all')
-Finally, there is a helper function ``pandas.plotting.table`` to create a table from :class:`DataFrame` and :class:`Series`, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has.
+There also exists a helper function ``pandas.plotting.table``, which creates a
+table from :class:`DataFrame` or :class:`Series`, and adds it to an
+``matplotlib.Axes`` instance. This function can accept keywords which the
+matplotlib `table <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ has.
.. ipython:: python
@@ -1461,18 +1485,18 @@ Colormaps
A potential issue when plotting a large number of columns is that it can be
difficult to distinguish some series due to repetition in the default colors. To
-remedy this, DataFrame plotting supports the use of the ``colormap=`` argument,
+remedy this, ``DataFrame`` plotting supports the use of the ``colormap`` argument,
which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__
or a string that is a name of a colormap registered with Matplotlib. A
visualization of the default matplotlib colormaps is available `here
-<http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps>`__.
+<https://matplotlib.org/examples/color/colormaps_reference.html>`__.
As matplotlib does not directly support colormaps for line-based plots, the
colors are selected based on an even spacing determined by the number of columns
-in the DataFrame. There is no consideration made for background color, so some
+in the ``DataFrame``. There is no consideration made for background color, so some
colormaps will produce lines that are not easily visible.
-To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=``
+To use the cubehelix colormap, we can pass ``colormap='cubehelix'``.
.. ipython:: python
:suppress:
@@ -1494,7 +1518,7 @@ To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=
plt.close('all')
-or we can pass the colormap itself
+Alternatively, we can pass the colormap itself:
.. ipython:: python
@@ -1565,9 +1589,9 @@ Plotting directly with matplotlib
In some situations it may still be preferable or necessary to prepare plots
directly with matplotlib, for instance when a certain type of plot or
-customization is not (yet) supported by pandas. Series and DataFrame objects
-behave like arrays and can therefore be passed directly to matplotlib functions
-without explicit casts.
+customization is not (yet) supported by pandas. ``Series`` and ``DataFrame``
+objects behave like arrays and can therefore be passed directly to
+matplotlib functions without explicit casts.
pandas also automatically registers formatters and locators that recognize date
indices, thereby extending date and time support to practically all plot types
| Minor changes to the documentation, specifically `categorical.rst` and `visualization.rst`:
* Function references as links.
* Backticks ` `` ` around Series, DataFrame.
* Minor rephrasing of sentences, spelling, etc.
* For plots such as lag plots, Andrews plot, Radviz, I added links to Wikipedia entries.
Comments very welcome.
| https://api.github.com/repos/pandas-dev/pandas/pulls/19428 | 2018-01-28T11:29:28Z | 2018-01-31T23:54:16Z | 2018-01-31T23:54:16Z | 2018-02-01T05:29:38Z |
DOC/ERR: better error message on no common merge keys | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index ca625f492b61f..54dba831f7216 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -667,6 +667,7 @@ Reshaping
- Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`)
- Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`)
- Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`)
+- Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 201d8ba427c8a..3d1983f65d70d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -233,7 +233,7 @@
--------
merge_ordered
merge_asof
-
+DataFrame.join
"""
# -----------------------------------------------------------------------
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 3ec78ce52c6e5..9dbb327e3d956 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1021,7 +1021,12 @@ def _validate_specification(self):
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
- raise MergeError('No common columns to perform merge on')
+ raise MergeError(
+ 'No common columns to perform merge on. '
+ 'Merge options: left_on={lon}, right_on={ron}, '
+ 'left_index={lidx}, right_index={ridx}'
+ .format(lon=self.left_on, ron=self.right_on,
+ lidx=self.left_index, ridx=self.right_index))
if not common_cols.is_unique:
raise MergeError("Data columns not unique: {common!r}"
.format(common=common_cols))
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f63c206c0c407..32f83ab972be5 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -270,6 +270,14 @@ def test_no_overlap_more_informative_error(self):
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
pytest.raises(MergeError, merge, df1, df2)
+ msg = ('No common columns to perform merge on. '
+ 'Merge options: left_on={lon}, right_on={ron}, '
+ 'left_index={lidx}, right_index={ridx}'
+ .format(lon=None, ron=None, lidx=False, ridx=False))
+
+ with tm.assert_raises_regex(MergeError, msg):
+ merge(df1, df2)
+
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
| - [o] closes #19391
- [o] tests passed
- [o] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- Let MergeError emit values keyword arguments
- Add `DataFrame.join` on `See also` section of `pandas.merge`
| https://api.github.com/repos/pandas-dev/pandas/pulls/19427 | 2018-01-28T08:30:26Z | 2018-02-06T14:16:14Z | 2018-02-06T14:16:13Z | 2018-02-06T14:59:55Z |
CLN: GH19404 Changing function signature to match logic | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e777281b11e1..0174dc47f2144 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1906,7 +1906,7 @@ def to_pickle(self, path, compression='infer',
return to_pickle(self, path, compression=compression,
protocol=protocol)
- def to_clipboard(self, excel=None, sep=None, **kwargs):
+ def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 347ec41baf0e1..dcc221ce978b3 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -63,7 +63,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
return read_table(StringIO(text), sep=sep, **kwargs)
-def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
+def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
| - [x] closes #19404
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/19425 | 2018-01-27T18:09:39Z | 2018-02-01T13:26:37Z | 2018-02-01T13:26:36Z | 2018-02-01T13:26:40Z |
Remove src/numpy.pxd | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 996ece063b980..bfea4ff9915ac 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -8,10 +8,14 @@ from cpython.slice cimport PySlice_Check
import numpy as np
cimport numpy as cnp
-from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t,
- NPY_DATETIME, NPY_TIMEDELTA)
+from numpy cimport ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t
cnp.import_array()
+cdef extern from "numpy/arrayobject.h":
+ # These can be cimported directly from numpy in cython>=0.27.3
+ cdef enum NPY_TYPES:
+ NPY_DATETIME
+ NPY_TIMEDELTA
cimport util
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index f14d508a625d0..b29a2e519efcd 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -12,6 +12,22 @@ iNaT = util.get_nat()
cdef bint PY2 = sys.version_info[0] == 2
cdef double nan = <double> np.NaN
+cdef extern from "numpy/arrayobject.h":
+ # cython's numpy.dtype specification is incorrect, which leads to
+ # errors in issubclass(self.dtype.type, np.bool_), so we directly
+ # include the correct version
+ # https://github.com/cython/cython/issues/2022
+
+ ctypedef class numpy.dtype [object PyArray_Descr]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined. Please
+ # ask on cython-dev if you need more.
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef char byteorder
+ cdef object fields
+ cdef tuple names
+
from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN
# core.common import for fast inference checks
@@ -609,13 +625,13 @@ cdef class Validator:
cdef:
Py_ssize_t n
- cnp.dtype dtype
+ dtype dtype
bint skipna
def __cinit__(
self,
Py_ssize_t n,
- cnp.dtype dtype=np.dtype(np.object_),
+ dtype dtype=np.dtype(np.object_),
bint skipna=False
):
self.n = n
@@ -823,7 +839,7 @@ cdef class TemporalValidator(Validator):
def __cinit__(
self,
Py_ssize_t n,
- cnp.dtype dtype=np.dtype(np.object_),
+ dtype dtype=np.dtype(np.object_),
bint skipna=False
):
self.n = n
diff --git a/pandas/_libs/src/numpy.pxd b/pandas/_libs/src/numpy.pxd
deleted file mode 100644
index 8ce398ce218a8..0000000000000
--- a/pandas/_libs/src/numpy.pxd
+++ /dev/null
@@ -1,994 +0,0 @@
-# NumPy static imports for Cython
-#
-# If any of the PyArray_* functions are called, import_array must be
-# called first.
-#
-# This also defines backwards-compatability buffer acquisition
-# code for use in Python 2.x (or Python <= 2.5 when NumPy starts
-# implementing PEP-3118 directly).
-#
-# Because of laziness, the format string of the buffer is statically
-# allocated. Increase the size if this is not enough, or submit a
-# patch to do this properly.
-#
-# Author: Dag Sverre Seljebotn
-#
-
-DEF _buffer_format_string_len = 255
-
-cimport cpython.buffer as pybuf
-from cpython.ref cimport Py_INCREF, Py_XDECREF
-from cpython.object cimport PyObject
-cimport libc.stdlib as stdlib
-cimport libc.stdio as stdio
-
-cdef extern from "Python.h":
- ctypedef int Py_intptr_t
-
-cdef extern from "numpy/arrayobject.h":
- ctypedef Py_intptr_t npy_intp
- ctypedef size_t npy_uintp
-
- cdef enum NPY_TYPES:
- NPY_BOOL
- NPY_BYTE
- NPY_UBYTE
- NPY_SHORT
- NPY_USHORT
- NPY_INT
- NPY_UINT
- NPY_LONG
- NPY_ULONG
- NPY_LONGLONG
- NPY_ULONGLONG
- NPY_FLOAT
- NPY_DOUBLE
- NPY_LONGDOUBLE
- NPY_CFLOAT
- NPY_CDOUBLE
- NPY_CLONGDOUBLE
- NPY_OBJECT
- NPY_STRING
- NPY_UNICODE
- NPY_VOID
- NPY_NTYPES
- NPY_NOTYPE
-
- NPY_INT8
- NPY_INT16
- NPY_INT32
- NPY_INT64
- NPY_INT128
- NPY_INT256
- NPY_UINT8
- NPY_UINT16
- NPY_UINT32
- NPY_UINT64
- NPY_UINT128
- NPY_UINT256
- NPY_FLOAT16
- NPY_FLOAT32
- NPY_FLOAT64
- NPY_FLOAT80
- NPY_FLOAT96
- NPY_FLOAT128
- NPY_FLOAT256
- NPY_COMPLEX32
- NPY_COMPLEX64
- NPY_COMPLEX128
- NPY_COMPLEX160
- NPY_COMPLEX192
- NPY_COMPLEX256
- NPY_COMPLEX512
-
- NPY_DATETIME
- NPY_TIMEDELTA
-
- NPY_INTP
-
- ctypedef enum NPY_ORDER:
- NPY_ANYORDER
- NPY_CORDER
- NPY_FORTRANORDER
-
- ctypedef enum NPY_CLIPMODE:
- NPY_CLIP
- NPY_WRAP
- NPY_RAISE
-
- ctypedef enum NPY_SCALARKIND:
- NPY_NOSCALAR,
- NPY_BOOL_SCALAR,
- NPY_INTPOS_SCALAR,
- NPY_INTNEG_SCALAR,
- NPY_FLOAT_SCALAR,
- NPY_COMPLEX_SCALAR,
- NPY_OBJECT_SCALAR
-
- ctypedef enum NPY_SORTKIND:
- NPY_QUICKSORT
- NPY_HEAPSORT
- NPY_MERGESORT
-
- ctypedef enum NPY_SEARCHSIDE:
- NPY_SEARCHLEFT
- NPY_SEARCHRIGHT
-
- enum:
- NPY_C_CONTIGUOUS
- NPY_F_CONTIGUOUS
- NPY_CONTIGUOUS
- NPY_FORTRAN
- NPY_OWNDATA
- NPY_FORCECAST
- NPY_ENSURECOPY
- NPY_ENSUREARRAY
- NPY_ELEMENTSTRIDES
- NPY_ALIGNED
- NPY_NOTSWAPPED
- NPY_WRITEABLE
- NPY_UPDATEIFCOPY
- NPY_ARR_HAS_DESCR
-
- NPY_BEHAVED
- NPY_BEHAVED_NS
- NPY_CARRAY
- NPY_CARRAY_RO
- NPY_FARRAY
- NPY_FARRAY_RO
- NPY_DEFAULT
-
- NPY_IN_ARRAY
- NPY_OUT_ARRAY
- NPY_INOUT_ARRAY
- NPY_IN_FARRAY
- NPY_OUT_FARRAY
- NPY_INOUT_FARRAY
-
- NPY_UPDATE_ALL
-
- cdef enum:
- NPY_MAXDIMS
-
- npy_intp NPY_MAX_ELSIZE
-
- ctypedef void (*PyArray_VectorUnaryFunc)(
- void *, void *, npy_intp, void *, void *)
-
- ctypedef class numpy.dtype [object PyArray_Descr]:
- # Use PyDataType_* macros when possible, however there are no macros
- # for accessing some of the fields, so some are defined. Please
- # ask on cython-dev if you need more.
- cdef int type_num
- cdef int itemsize "elsize"
- cdef char byteorder
- cdef object fields
- cdef tuple names
-
- ctypedef extern class numpy.flatiter [object PyArrayIterObject]:
- # Use through macros
- pass
-
- ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]:
- # Use through macros
- pass
-
- ctypedef struct PyArrayObject:
- # For use in situations where ndarray can't replace PyArrayObject*,
- # like PyArrayObject**.
- pass
-
- ctypedef class numpy.ndarray [object PyArrayObject]:
- cdef __cythonbufferdefaults__ = {"mode": "strided"}
-
- cdef:
- # Only taking a few of the most commonly used and stable fields.
- # One should use PyArray_* macros instead to access the C fields.
- char *data
- int ndim "nd"
- npy_intp *shape "dimensions"
- npy_intp *strides
- dtype descr
- PyObject* base
-
- # Note: This syntax (function definition in pxd files) is an
- # experimental exception made for __getbuffer__ and __releasebuffer__
- # -- the details of this may change.
- def __getbuffer__(ndarray self, Py_buffer* info, int flags):
- # This implementation of getbuffer is geared towards Cython
- # requirements, and does not yet fulfill the PEP.
- # In particular strided access is always provided regardless
- # of flags
-
- if info == NULL: return
-
- cdef int copy_shape, i, ndim
- cdef int endian_detector = 1
- cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
-
- ndim = PyArray_NDIM(self)
-
- if sizeof(npy_intp) != sizeof(Py_ssize_t):
- copy_shape = 1
- else:
- copy_shape = 0
-
- if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
- and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
- raise ValueError(u"ndarray is not C contiguous")
-
- if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
- and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
- raise ValueError(u"ndarray is not Fortran contiguous")
-
- info.buf = PyArray_DATA(self)
- info.ndim = ndim
- if copy_shape:
- # Allocate new buffer for strides and shape info.
- # This is allocated as one block, strides first.
- info.strides = <Py_ssize_t*>stdlib.malloc(
- sizeof(Py_ssize_t) * <size_t>ndim * 2)
-
- info.shape = info.strides + ndim
- for i in range(ndim):
- info.strides[i] = PyArray_STRIDES(self)[i]
- info.shape[i] = PyArray_DIMS(self)[i]
- else:
- info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
- info.shape = <Py_ssize_t*>PyArray_DIMS(self)
- info.suboffsets = NULL
- info.itemsize = PyArray_ITEMSIZE(self)
- info.readonly = not PyArray_ISWRITEABLE(self)
-
- cdef int t
- cdef char* f = NULL
- cdef dtype descr = self.descr
- cdef list stack
- cdef int offset
-
- cdef bint hasfields = PyDataType_HASFIELDS(descr)
-
- if not hasfields and not copy_shape:
- # do not call releasebuffer
- info.obj = None
- else:
- # need to call releasebuffer
- info.obj = self
-
- if not hasfields:
- t = descr.type_num
- if ((descr.byteorder == '>' and little_endian) or
- (descr.byteorder == '<' and not little_endian)):
- raise ValueError(u"Non-native byte order not supported")
- if t == NPY_BYTE: f = "b"
- elif t == NPY_UBYTE: f = "B"
- elif t == NPY_SHORT: f = "h"
- elif t == NPY_USHORT: f = "H"
- elif t == NPY_INT: f = "i"
- elif t == NPY_UINT: f = "I"
- elif t == NPY_LONG: f = "l"
- elif t == NPY_ULONG: f = "L"
- elif t == NPY_LONGLONG: f = "q"
- elif t == NPY_ULONGLONG: f = "Q"
- elif t == NPY_FLOAT: f = "f"
- elif t == NPY_DOUBLE: f = "d"
- elif t == NPY_LONGDOUBLE: f = "g"
- elif t == NPY_CFLOAT: f = "Zf"
- elif t == NPY_CDOUBLE: f = "Zd"
- elif t == NPY_CLONGDOUBLE: f = "Zg"
- elif t == NPY_OBJECT: f = "O"
- else:
- raise ValueError(
- u"unknown dtype code in numpy.pxd (%d)" % t)
- info.format = f
- return
- else:
- info.format = <char*>stdlib.malloc(_buffer_format_string_len)
- info.format[0] = '^' # Native data types, manual alignment
- offset = 0
- f = _util_dtypestring(descr, info.format + 1,
- info.format + _buffer_format_string_len,
- &offset)
- f[0] = 0 # Terminate format string
-
- def __releasebuffer__(ndarray self, Py_buffer* info):
- if PyArray_HASFIELDS(self):
- stdlib.free(info.format)
- if sizeof(npy_intp) != sizeof(Py_ssize_t):
- stdlib.free(info.strides)
- # info.shape was stored after info.strides in the same block
-
- ctypedef signed char npy_bool
-
- ctypedef signed char npy_byte
- ctypedef signed short npy_short
- ctypedef signed int npy_int
- ctypedef signed long npy_long
- ctypedef signed long long npy_longlong
-
- ctypedef unsigned char npy_ubyte
- ctypedef unsigned short npy_ushort
- ctypedef unsigned int npy_uint
- ctypedef unsigned long npy_ulong
- ctypedef unsigned long long npy_ulonglong
-
- ctypedef float npy_float
- ctypedef double npy_double
- ctypedef long double npy_longdouble
-
- ctypedef signed char npy_int8
- ctypedef signed short npy_int16
- ctypedef signed int npy_int32
- ctypedef signed long long npy_int64
- ctypedef signed long long npy_int96
- ctypedef signed long long npy_int128
-
- ctypedef unsigned char npy_uint8
- ctypedef unsigned short npy_uint16
- ctypedef unsigned int npy_uint32
- ctypedef unsigned long long npy_uint64
- ctypedef unsigned long long npy_uint96
- ctypedef unsigned long long npy_uint128
-
- ctypedef float npy_float16
- ctypedef float npy_float32
- ctypedef double npy_float64
- ctypedef long double npy_float80
- ctypedef long double npy_float96
- ctypedef long double npy_float128
-
- ctypedef struct npy_cfloat:
- double real
- double imag
-
- ctypedef struct npy_cdouble:
- double real
- double imag
-
- ctypedef struct npy_clongdouble:
- double real
- double imag
-
- ctypedef struct npy_complex64:
- double real
- double imag
-
- ctypedef struct npy_complex128:
- double real
- double imag
-
- ctypedef struct npy_complex160:
- double real
- double imag
-
- ctypedef struct npy_complex192:
- double real
- double imag
-
- ctypedef struct npy_complex256:
- double real
- double imag
-
- ctypedef struct PyArray_Dims:
- npy_intp *ptr
- int len
-
- void import_array()
-
- #
- # Macros from ndarrayobject.h
- #
- bint PyArray_CHKFLAGS(ndarray m, int flags)
- bint PyArray_ISCONTIGUOUS(ndarray m)
- bint PyArray_ISWRITEABLE(ndarray m)
- bint PyArray_ISALIGNED(ndarray m)
-
- int PyArray_NDIM(ndarray)
- bint PyArray_ISONESEGMENT(ndarray)
- bint PyArray_ISFORTRAN(ndarray)
- int PyArray_FORTRANIF(ndarray)
-
- void* PyArray_DATA(ndarray)
- char* PyArray_BYTES(ndarray)
- npy_intp* PyArray_DIMS(ndarray)
- npy_intp* PyArray_STRIDES(ndarray)
- npy_intp PyArray_DIM(ndarray, size_t)
- npy_intp PyArray_STRIDE(ndarray, size_t)
-
- # object PyArray_BASE(ndarray) wrong refcount semantics
- # dtype PyArray_DESCR(ndarray) wrong refcount semantics
- int PyArray_FLAGS(ndarray)
- npy_intp PyArray_ITEMSIZE(ndarray)
- int PyArray_TYPE(ndarray arr)
-
- object PyArray_GETITEM(ndarray arr, void *itemptr)
- int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
-
- bint PyTypeNum_ISBOOL(int)
- bint PyTypeNum_ISUNSIGNED(int)
- bint PyTypeNum_ISSIGNED(int)
- bint PyTypeNum_ISINTEGER(int)
- bint PyTypeNum_ISFLOAT(int)
- bint PyTypeNum_ISNUMBER(int)
- bint PyTypeNum_ISSTRING(int)
- bint PyTypeNum_ISCOMPLEX(int)
- bint PyTypeNum_ISPYTHON(int)
- bint PyTypeNum_ISFLEXIBLE(int)
- bint PyTypeNum_ISUSERDEF(int)
- bint PyTypeNum_ISEXTENDED(int)
- bint PyTypeNum_ISOBJECT(int)
-
- bint PyDataType_ISBOOL(dtype)
- bint PyDataType_ISUNSIGNED(dtype)
- bint PyDataType_ISSIGNED(dtype)
- bint PyDataType_ISINTEGER(dtype)
- bint PyDataType_ISFLOAT(dtype)
- bint PyDataType_ISNUMBER(dtype)
- bint PyDataType_ISSTRING(dtype)
- bint PyDataType_ISCOMPLEX(dtype)
- bint PyDataType_ISPYTHON(dtype)
- bint PyDataType_ISFLEXIBLE(dtype)
- bint PyDataType_ISUSERDEF(dtype)
- bint PyDataType_ISEXTENDED(dtype)
- bint PyDataType_ISOBJECT(dtype)
- bint PyDataType_HASFIELDS(dtype)
-
- bint PyArray_ISBOOL(ndarray)
- bint PyArray_ISUNSIGNED(ndarray)
- bint PyArray_ISSIGNED(ndarray)
- bint PyArray_ISINTEGER(ndarray)
- bint PyArray_ISFLOAT(ndarray)
- bint PyArray_ISNUMBER(ndarray)
- bint PyArray_ISSTRING(ndarray)
- bint PyArray_ISCOMPLEX(ndarray)
- bint PyArray_ISPYTHON(ndarray)
- bint PyArray_ISFLEXIBLE(ndarray)
- bint PyArray_ISUSERDEF(ndarray)
- bint PyArray_ISEXTENDED(ndarray)
- bint PyArray_ISOBJECT(ndarray)
- bint PyArray_HASFIELDS(ndarray)
-
- bint PyArray_ISVARIABLE(ndarray)
-
- bint PyArray_SAFEALIGNEDCOPY(ndarray)
- bint PyArray_ISNBO(ndarray)
- bint PyArray_IsNativeByteOrder(ndarray)
- bint PyArray_ISNOTSWAPPED(ndarray)
- bint PyArray_ISBYTESWAPPED(ndarray)
-
- bint PyArray_FLAGSWAP(ndarray, int)
-
- bint PyArray_ISCARRAY(ndarray)
- bint PyArray_ISCARRAY_RO(ndarray)
- bint PyArray_ISFARRAY(ndarray)
- bint PyArray_ISFARRAY_RO(ndarray)
- bint PyArray_ISBEHAVED(ndarray)
- bint PyArray_ISBEHAVED_RO(ndarray)
-
- bint PyDataType_ISNOTSWAPPED(dtype)
- bint PyDataType_ISBYTESWAPPED(dtype)
-
- bint PyArray_DescrCheck(object)
-
- bint PyArray_Check(object)
- bint PyArray_CheckExact(object)
-
- # Cannot be supported due to out arg:
- # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
- # bint PyArray_HasArrayInterface(op, out)
-
- bint PyArray_IsZeroDim(object)
- # Cannot be supported due to ## ## in macro:
- # bint PyArray_IsScalar(object, verbatim work)
- bint PyArray_CheckScalar(object)
- bint PyArray_IsPythonNumber(object)
- bint PyArray_IsPythonScalar(object)
- bint PyArray_IsAnyScalar(object)
- bint PyArray_CheckAnyScalar(object)
- ndarray PyArray_GETCONTIGUOUS(ndarray)
- bint PyArray_SAMESHAPE(ndarray, ndarray)
- npy_intp PyArray_SIZE(ndarray)
- npy_intp PyArray_NBYTES(ndarray)
-
- object PyArray_FROM_O(object)
- object PyArray_FROM_OF(object m, int flags)
- bint PyArray_FROM_OT(object m, int type)
- bint PyArray_FROM_OTF(object m, int type, int flags)
- object PyArray_FROMANY(object m, int type, int min, int max, int flags)
- object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
- object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
- void PyArray_FILLWBYTE(object, int val)
- npy_intp PyArray_REFCOUNT(object)
- object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
- unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
- bint PyArray_EquivByteorders(int b1, int b2)
- object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
- object PyArray_SimpleNewFromData(int nd, npy_intp* dims,
- int typenum, void* data)
- #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
- object PyArray_ToScalar(void* data, ndarray arr)
-
- void* PyArray_GETPTR1(ndarray m, npy_intp i)
- void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j)
- void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k)
- void* PyArray_GETPTR4(ndarray m, npy_intp i,
- npy_intp j, npy_intp k, npy_intp l)
-
- void PyArray_XDECREF_ERR(ndarray)
- # Cannot be supported due to out arg
- # void PyArray_DESCR_REPLACE(descr)
-
- object PyArray_Copy(ndarray)
- object PyArray_FromObject(object op, int type,
- int min_depth, int max_depth)
- object PyArray_ContiguousFromObject(object op, int type,
- int min_depth, int max_depth)
- object PyArray_CopyFromObject(object op, int type,
- int min_depth, int max_depth)
-
- object PyArray_Cast(ndarray mp, int type_num)
- object PyArray_Take(ndarray ap, object items, int axis)
- object PyArray_Put(ndarray ap, object items, object values)
-
- void PyArray_ITER_RESET(flatiter it) nogil
- void PyArray_ITER_NEXT(flatiter it) nogil
- void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
- void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
- void* PyArray_ITER_DATA(flatiter it) nogil
- bint PyArray_ITER_NOTDONE(flatiter it) nogil
-
- void PyArray_MultiIter_RESET(broadcast multi) nogil
- void PyArray_MultiIter_NEXT(broadcast multi) nogil
- void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
- void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
- void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
- void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
- bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
-
- # Functions from __multiarray_api.h
-
- # Functions taking dtype and returning object/ndarray are disabled
- # for now as they steal dtype references. I'm conservative and disable
- # more than is probably needed until it can be checked further.
- int PyArray_SetNumericOps (object)
- object PyArray_GetNumericOps ()
- int PyArray_INCREF (ndarray)
- int PyArray_XDECREF (ndarray)
- void PyArray_SetStringFunction (object, int)
- dtype PyArray_DescrFromType (int)
- object PyArray_TypeObjectFromType (int)
- char * PyArray_Zero (ndarray)
- char * PyArray_One (ndarray)
- #object PyArray_CastToType (ndarray, dtype, int)
- int PyArray_CastTo (ndarray, ndarray)
- int PyArray_CastAnyTo (ndarray, ndarray)
- int PyArray_CanCastSafely (int, int)
- npy_bool PyArray_CanCastTo (dtype, dtype)
- int PyArray_ObjectType (object, int)
- dtype PyArray_DescrFromObject (object, dtype)
- #ndarray* PyArray_ConvertToCommonType (object, int *)
- dtype PyArray_DescrFromScalar (object)
- dtype PyArray_DescrFromTypeObject (object)
- npy_intp PyArray_Size (object)
- #object PyArray_Scalar (void *, dtype, object)
- #object PyArray_FromScalar (object, dtype)
- void PyArray_ScalarAsCtype (object, void *)
- #int PyArray_CastScalarToCtype (object, void *, dtype)
- #int PyArray_CastScalarDirect (object, dtype, void *, int)
- object PyArray_ScalarFromObject (object)
- #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
- object PyArray_FromDims (int, int *, int)
- #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
- #object PyArray_FromAny (object, dtype, int, int, int, object)
- object PyArray_EnsureArray (object)
- object PyArray_EnsureAnyArray (object)
- #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
- #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
- #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
- #object PyArray_FromIter (object, dtype, npy_intp)
- object PyArray_Return (ndarray)
- #object PyArray_GetField (ndarray, dtype, int)
- #int PyArray_SetField (ndarray, dtype, int, object)
- object PyArray_Byteswap (ndarray, npy_bool)
- object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
- int PyArray_MoveInto (ndarray, ndarray)
- int PyArray_CopyInto (ndarray, ndarray)
- int PyArray_CopyAnyInto (ndarray, ndarray)
- int PyArray_CopyObject (ndarray, object)
- object PyArray_NewCopy (ndarray, NPY_ORDER)
- object PyArray_ToList (ndarray)
- object PyArray_ToString (ndarray, NPY_ORDER)
- int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
- int PyArray_Dump (object, object, int)
- object PyArray_Dumps (object, int)
- int PyArray_ValidType (int)
- void PyArray_UpdateFlags (ndarray, int)
- object PyArray_New (type, int, npy_intp *, int, npy_intp *,
- void *, int, int, object)
- #dtype PyArray_DescrNew (dtype)
- dtype PyArray_DescrNewFromType (int)
- double PyArray_GetPriority (object, double)
- object PyArray_IterNew (object)
- object PyArray_MultiIterNew (int, ...)
-
- int PyArray_PyIntAsInt (object)
- npy_intp PyArray_PyIntAsIntp (object)
- int PyArray_Broadcast (broadcast)
- void PyArray_FillObjectArray (ndarray, object)
- int PyArray_FillWithScalar (ndarray, object)
- npy_bool PyArray_CheckStrides (
- int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
- dtype PyArray_DescrNewByteorder (dtype, char)
- object PyArray_IterAllButAxis (object, int *)
- #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
- #object PyArray_FromArray (ndarray, dtype, int)
- object PyArray_FromInterface (object)
- object PyArray_FromStructInterface (object)
- #object PyArray_FromArrayAttr (object, dtype, object)
- #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
- int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
- object PyArray_NewFlagsObject (object)
- npy_bool PyArray_CanCastScalar (type, type)
- #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
- int PyArray_RemoveSmallest (broadcast)
- int PyArray_ElementStrides (object)
- void PyArray_Item_INCREF (char *, dtype)
- void PyArray_Item_XDECREF (char *, dtype)
- object PyArray_FieldNames (object)
- object PyArray_Transpose (ndarray, PyArray_Dims *)
- object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
- object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
- object PyArray_PutMask (ndarray, object, object)
- object PyArray_Repeat (ndarray, object, int)
- object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
- int PyArray_Sort (ndarray, int, NPY_SORTKIND)
- object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
- object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
- object PyArray_ArgMax (ndarray, int, ndarray)
- object PyArray_ArgMin (ndarray, int, ndarray)
- object PyArray_Reshape (ndarray, object)
- object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
- object PyArray_Squeeze (ndarray)
- #object PyArray_View (ndarray, dtype, type)
- object PyArray_SwapAxes (ndarray, int, int)
- object PyArray_Max (ndarray, int, ndarray)
- object PyArray_Min (ndarray, int, ndarray)
- object PyArray_Ptp (ndarray, int, ndarray)
- object PyArray_Mean (ndarray, int, int, ndarray)
- object PyArray_Trace (ndarray, int, int, int, int, ndarray)
- object PyArray_Diagonal (ndarray, int, int, int)
- object PyArray_Clip (ndarray, object, object, ndarray)
- object PyArray_Conjugate (ndarray, ndarray)
- object PyArray_Nonzero (ndarray)
- object PyArray_Std (ndarray, int, int, ndarray, int)
- object PyArray_Sum (ndarray, int, int, ndarray)
- object PyArray_CumSum (ndarray, int, int, ndarray)
- object PyArray_Prod (ndarray, int, int, ndarray)
- object PyArray_CumProd (ndarray, int, int, ndarray)
- object PyArray_All (ndarray, int, ndarray)
- object PyArray_Any (ndarray, int, ndarray)
- object PyArray_Compress (ndarray, object, int, ndarray)
- object PyArray_Flatten (ndarray, NPY_ORDER)
- object PyArray_Ravel (ndarray, NPY_ORDER)
- npy_intp PyArray_MultiplyList (npy_intp *, int)
- int PyArray_MultiplyIntList (int *, int)
- void * PyArray_GetPtr (ndarray, npy_intp*)
- int PyArray_CompareLists (npy_intp *, npy_intp *, int)
- #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
- #int PyArray_As1D (object*, char **, int *, int)
- #int PyArray_As2D (object*, char ***, int *, int *, int)
- int PyArray_Free (object, void *)
- #int PyArray_Converter (object, object*)
- int PyArray_IntpFromSequence (object, npy_intp *, int)
- object PyArray_Concatenate (object, int)
- object PyArray_InnerProduct (object, object)
- object PyArray_MatrixProduct (object, object)
- object PyArray_CopyAndTranspose (object)
- object PyArray_Correlate (object, object, int)
- int PyArray_TypestrConvert (int, int)
- #int PyArray_DescrConverter (object, dtype*)
- #int PyArray_DescrConverter2 (object, dtype*)
- int PyArray_IntpConverter (object, PyArray_Dims *)
- #int PyArray_BufferConverter (object, chunk)
- int PyArray_AxisConverter (object, int *)
- int PyArray_BoolConverter (object, npy_bool *)
- int PyArray_ByteorderConverter (object, char *)
- int PyArray_OrderConverter (object, NPY_ORDER *)
- unsigned char PyArray_EquivTypes (dtype, dtype)
- #object PyArray_Zeros (int, npy_intp *, dtype, int)
- #object PyArray_Empty (int, npy_intp *, dtype, int)
- object PyArray_Where (object, object, object)
- object PyArray_Arange (double, double, double, int)
- #object PyArray_ArangeObj (object, object, object, dtype)
- int PyArray_SortkindConverter (object, NPY_SORTKIND *)
- object PyArray_LexSort (object, int)
- object PyArray_Round (ndarray, int, ndarray)
- unsigned char PyArray_EquivTypenums (int, int)
- int PyArray_RegisterDataType (dtype)
- int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
- int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
- #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
- object PyArray_IntTupleFromIntp (int, npy_intp *)
- int PyArray_TypeNumFromName (char *)
- int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
- #int PyArray_OutputConverter (object, ndarray*)
- object PyArray_BroadcastToShape (object, npy_intp *, int)
- void _PyArray_SigintHandler (int)
- void* _PyArray_GetSigintBuf ()
- #int PyArray_DescrAlignConverter (object, dtype*)
- #int PyArray_DescrAlignConverter2 (object, dtype*)
- int PyArray_SearchsideConverter (object, void *)
- object PyArray_CheckAxis (ndarray, int *, int)
- npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
- int PyArray_CompareString (char *, char *, size_t)
-
-
-# Typedefs that matches the runtime dtype objects in
-# the numpy module.
-
-# The ones that are commented out needs an IFDEF function
-# in Cython to enable them only on the right systems.
-
-ctypedef npy_int8 int8_t
-ctypedef npy_int16 int16_t
-ctypedef npy_int32 int32_t
-ctypedef npy_int64 int64_t
-#ctypedef npy_int96 int96_t
-#ctypedef npy_int128 int128_t
-
-ctypedef npy_uint8 uint8_t
-ctypedef npy_uint16 uint16_t
-ctypedef npy_uint32 uint32_t
-ctypedef npy_uint64 uint64_t
-#ctypedef npy_uint96 uint96_t
-#ctypedef npy_uint128 uint128_t
-
-ctypedef npy_float16 float16_t
-ctypedef npy_float32 float32_t
-ctypedef npy_float64 float64_t
-#ctypedef npy_float80 float80_t
-#ctypedef npy_float128 float128_t
-
-ctypedef float complex complex64_t
-ctypedef double complex complex128_t
-
-# The int types are mapped a bit surprising --
-# numpy.int corresponds to 'l' and numpy.long to 'q'
-ctypedef npy_long int_t
-ctypedef npy_longlong long_t
-ctypedef npy_longlong longlong_t
-
-ctypedef npy_ulong uint_t
-ctypedef npy_ulonglong ulong_t
-ctypedef npy_ulonglong ulonglong_t
-
-ctypedef npy_intp intp_t
-ctypedef npy_uintp uintp_t
-
-ctypedef npy_double float_t
-ctypedef npy_double double_t
-ctypedef npy_longdouble longdouble_t
-
-ctypedef npy_cfloat cfloat_t
-ctypedef npy_cdouble cdouble_t
-ctypedef npy_clongdouble clongdouble_t
-
-ctypedef npy_cdouble complex_t
-
-cdef inline object PyArray_MultiIterNew1(a):
- return PyArray_MultiIterNew(1, <void*>a)
-
-cdef inline object PyArray_MultiIterNew2(a, b):
- return PyArray_MultiIterNew(2, <void*>a, <void*>b)
-
-cdef inline object PyArray_MultiIterNew3(a, b, c):
- return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
-
-cdef inline object PyArray_MultiIterNew4(a, b, c, d):
- return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
-
-cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
- return PyArray_MultiIterNew(5, <void*>a, <void*>b,
- <void*>c, <void*> d, <void*> e)
-
-cdef inline char* _util_dtypestring(dtype descr, char* f,
- char* end, int* offset) except NULL:
- # Recursive utility function used in __getbuffer__ to get format
- # string. The new location in the format string is returned.
-
- cdef dtype child
- cdef int delta_offset
- cdef tuple i
- cdef int endian_detector = 1
- cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
- cdef tuple fields
-
- for childname in descr.names:
- fields = descr.fields[childname]
- child, new_offset = fields
-
- if (end - f) - (new_offset - offset[0]) < 15:
- raise RuntimeError(
- u"Format string allocated too short, see comment in numpy.pxd")
-
- if ((child.byteorder == '>' and little_endian) or
- (child.byteorder == '<' and not little_endian)):
- raise ValueError(u"Non-native byte order not supported")
- # One could encode it in the format string and have Cython
- # complain instead, BUT: < and > in format strings also imply
- # standardized sizes for datatypes, and we rely on native in
- # order to avoid reencoding data types based on their size.
- #
- # A proper PEP 3118 exporter for other clients than Cython
- # must deal properly with this!
-
- # Output padding bytes
- while offset[0] < new_offset:
- f[0] = 120 # "x"; pad byte
- f += 1
- offset[0] += 1
-
- offset[0] += child.itemsize
-
- if not PyDataType_HASFIELDS(child):
- t = child.type_num
- if end - f < 5:
- raise RuntimeError(u"Format string allocated too short.")
-
- # Until ticket #99 is fixed, use integers to avoid warnings
- if t == NPY_BYTE: f[0] = 98 #"b"
- elif t == NPY_UBYTE: f[0] = 66 #"B"
- elif t == NPY_SHORT: f[0] = 104 #"h"
- elif t == NPY_USHORT: f[0] = 72 #"H"
- elif t == NPY_INT: f[0] = 105 #"i"
- elif t == NPY_UINT: f[0] = 73 #"I"
- elif t == NPY_LONG: f[0] = 108 #"l"
- elif t == NPY_ULONG: f[0] = 76 #"L"
- elif t == NPY_LONGLONG: f[0] = 113 #"q"
- elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
- elif t == NPY_FLOAT: f[0] = 102 #"f"
- elif t == NPY_DOUBLE: f[0] = 100 #"d"
- elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
- elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
- elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
- elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
- elif t == NPY_OBJECT: f[0] = 79 #"O"
- else:
- raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
- f += 1
- else:
- # Cython ignores struct boundary information ("T{...}"),
- # so don't output it
- f = _util_dtypestring(child, f, end, offset)
- return f
-
-
-#
-# ufunc API
-#
-
-cdef extern from "numpy/ufuncobject.h":
-
- ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *,
- npy_intp *, void *)
-
- ctypedef extern class numpy.ufunc [object PyUFuncObject]:
- cdef:
- int nin, nout, nargs
- int identity
- PyUFuncGenericFunction *functions
- void **data
- int ntypes
- int check_return
- char *name
- char *types
- char *doc
- void *ptr
- PyObject *obj
- PyObject *userloops
-
- cdef enum:
- PyUFunc_Zero
- PyUFunc_One
- PyUFunc_None
- UFUNC_ERR_IGNORE
- UFUNC_ERR_WARN
- UFUNC_ERR_RAISE
- UFUNC_ERR_CALL
- UFUNC_ERR_PRINT
- UFUNC_ERR_LOG
- UFUNC_MASK_DIVIDEBYZERO
- UFUNC_MASK_OVERFLOW
- UFUNC_MASK_UNDERFLOW
- UFUNC_MASK_INVALID
- UFUNC_SHIFT_DIVIDEBYZERO
- UFUNC_SHIFT_OVERFLOW
- UFUNC_SHIFT_UNDERFLOW
- UFUNC_SHIFT_INVALID
- UFUNC_FPE_DIVIDEBYZERO
- UFUNC_FPE_OVERFLOW
- UFUNC_FPE_UNDERFLOW
- UFUNC_FPE_INVALID
- UFUNC_ERR_DEFAULT
- UFUNC_ERR_DEFAULT2
-
- object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
- void **, char *, int, int, int, int, char *, char *, int)
- int PyUFunc_RegisterLoopForType(ufunc, int,
- PyUFuncGenericFunction, int *, void *)
- int PyUFunc_GenericFunction \
- (ufunc, PyObject *, PyObject *, PyArrayObject **)
- void PyUFunc_f_f_As_d_d \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_d_d \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_f_f \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_g_g \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_F_F_As_D_D \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_F_F \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_D_D \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_G_G \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_O_O \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_ff_f_As_dd_d \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_ff_f \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_dd_d \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_gg_g \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_FF_F_As_DD_D \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_DD_D \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_FF_F \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_GG_G \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_OO_O \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_O_O_method \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_OO_O_method \
- (char **, npy_intp *, npy_intp *, void *)
- void PyUFunc_On_Om \
- (char **, npy_intp *, npy_intp *, void *)
- int PyUFunc_GetPyValues \
- (char *, int *, int *, PyObject **)
- int PyUFunc_checkfperr \
- (int, PyObject *, int *)
- void PyUFunc_clearfperr()
- int PyUFunc_getfperr()
- int PyUFunc_handlefperr \
- (int, PyObject *, int, int *)
- int PyUFunc_ReplaceLoopBySignature \
- (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
- object PyUFunc_FromFuncAndDataAndSignature \
- (PyUFuncGenericFunction *, void **, char *, int, int, int,
- int, char *, char *, int, char *)
-
- void import_ufunc()
-
-
-cdef inline void set_array_base(ndarray arr, object base):
- cdef PyObject* baseptr
- if base is None:
- baseptr = NULL
- else:
- Py_INCREF(base) # important to do this before decref below!
- baseptr = <PyObject*>base
- Py_XDECREF(arr.base)
- arr.base = baseptr
-
-cdef inline object get_array_base(ndarray arr):
- if arr.base is NULL:
- return None
- else:
- return <object>arr.base
| The existence of the src/numpy.pxd file causes ambiguity as to where cimports are coming from. src/numpy.pxd? cython's numpy/__init__.pxd? What about the np.get_include() uses in setup.py?
This PR gets rid of the unnecessary file by picking out the one definition that is actually needed. | https://api.github.com/repos/pandas-dev/pandas/pulls/19418 | 2018-01-27T00:29:53Z | 2018-01-27T16:33:25Z | 2018-01-27T16:33:25Z | 2018-01-27T18:29:05Z |
Remove unused files from src/klib | diff --git a/pandas/_libs/src/klib/ktypes.h b/pandas/_libs/src/klib/ktypes.h
deleted file mode 100644
index 981f17372a2d5..0000000000000
--- a/pandas/_libs/src/klib/ktypes.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __KTYPES_H
-#define __KTYPES_H
-
-/* compipler specific configuration */
-
-#endif /* __KTYPES_H */
diff --git a/pandas/_libs/src/klib/kvec.h b/pandas/_libs/src/klib/kvec.h
deleted file mode 100644
index c5e6e6c407dfc..0000000000000
--- a/pandas/_libs/src/klib/kvec.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* The MIT License
-
- Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-*/
-
-/*
- An example:
-
-#include "kvec.h"
-int main() {
- kvec_t(int) array;
- kv_init(array);
- kv_push(int, array, 10); // append
- kv_a(int, array, 20) = 5; // dynamic
- kv_A(array, 20) = 4; // static
- kv_destroy(array);
- return 0;
-}
-*/
-
-/*
- 2008-09-22 (0.1.0):
-
- * The initial version.
-
-*/
-
-#ifndef AC_KVEC_H
-#define AC_KVEC_H
-
-#include <stdlib.h>
-#include <Python.h>
-#include <numpy/ndarraytypes.h>
-
-#ifndef PANDAS_INLINE
- #if defined(__GNUC__)
- #define PANDAS_INLINE static __inline__
- #elif defined(_MSC_VER)
- #define PANDAS_INLINE static __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define PANDAS_INLINE static inline
- #else
- #define PANDAS_INLINE
- #endif
-#endif
-
-#define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
-
-#define kvec_t(type) struct { size_t n, m; type *a; }
-#define kv_init(v) ((v).n = (v).m = 0, (v).a = 0)
-#define kv_destroy(v) free((v).a)
-#define kv_A(v, i) ((v).a[(i)])
-#define kv_pop(v) ((v).a[--(v).n])
-#define kv_size(v) ((v).n)
-#define kv_max(v) ((v).m)
-
-#define kv_resize(type, v, s) ((v).m = (s), (v).a = (type*)realloc((v).a, sizeof(type) * (v).m))
-
-#define kv_copy(type, v1, v0) do { \
- if ((v1).m < (v0).n) kv_resize(type, v1, (v0).n); \
- (v1).n = (v0).n; \
- memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \
- } while (0) \
-
-#define kv_push(type, v, x) do { \
- if ((v)->n == (v)->m) { \
- (v)->m = (v)->m? (v)->m<<1 : 2; \
- (v)->a = (type*)realloc((v)->a, sizeof(type) * (v)->m); \
- } \
- (v)->a[(v)->n++] = (x); \
- } while (0)
-
-#define kv_pushp(type, v) (((v).n == (v).m)? \
- ((v).m = ((v).m? (v).m<<1 : 2), \
- (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \
- : 0), ((v).a + ((v).n++))
-
-#define kv_a(type, v, i) ((v).m <= (size_t)(i)? \
- ((v).m = (v).n = (i) + 1, kv_roundup32((v).m), \
- (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \
- : (v).n <= (size_t)(i)? (v).n = (i) \
- : 0), (v).a[(i)]
-
-// #define kv_int64_push(v, x) (kv_push(int64_t, (v), (x)))
-
-typedef struct {
- size_t n, m;
- int64_t* a;
-} kv_int64_t;
-
-typedef struct {
- size_t n, m;
- double* a;
-} kv_double;
-
-typedef struct {
- size_t n, m;
- PyObject** a;
-} kv_object_t;
-
-void PANDAS_INLINE kv_object_push(kv_object_t *v, PyObject *x) {
- do {
- if (v->n == v->m) {
- v->m = v->m? v->m<<1 : 2;
- v->a = (PyObject**)realloc(v->a, sizeof(PyObject*) * v->m);
- }
- v->a[v->n++] = x;
- } while (0);
- // kv_push(PyObject*, v, x);
- Py_INCREF(x);
-}
-
-void PANDAS_INLINE kv_int64_push(kv_int64_t *v, int64_t x) {
- kv_push(int64_t, v, x);
-}
-
-void PANDAS_INLINE kv_double_push(kv_double *v, double x) {
- kv_push(double, v, x);
-}
-
-void PANDAS_INLINE kv_object_destroy(kv_object_t *v) {
- int i;
- for (i = 0; i < v->n; ++i)
- {
- Py_XDECREF(v->a[i]);
- }
- free(v->a);
-}
-
-
-#endif
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 61e3752a49639..e7f334b267461 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -44,7 +44,6 @@ Numeric decoder derived from from TCL library
#include <numpy/arrayobject.h> // NOLINT(build/include_order)
#include <numpy/arrayscalars.h> // NOLINT(build/include_order)
#include <numpy/npy_math.h> // NOLINT(build/include_order)
-#include <numpy_helper.h> // NOLINT(build/include_order)
#include <stdio.h> // NOLINT(build/include_order)
#include <ultrajson.h> // NOLINT(build/include_order)
#include <np_datetime.h> // NOLINT(build/include_order)
@@ -60,6 +59,8 @@ static PyTypeObject *cls_series;
static PyTypeObject *cls_index;
static PyTypeObject *cls_nat;
+npy_int64 get_nat(void) { return NPY_MIN_INT64; }
+
typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti,
void *outValue, size_t *_outLen);
diff --git a/setup.py b/setup.py
index 7ade1544ec5cd..859d50303ecb1 100755
--- a/setup.py
+++ b/setup.py
@@ -694,10 +694,9 @@ def pxd(name):
'pandas/_libs/src/ujson/lib/ultrajsonenc.c',
'pandas/_libs/src/ujson/lib/ultrajsondec.c'] +
np_datetime_sources),
- include_dirs=(['pandas/_libs/src/ujson/python',
- 'pandas/_libs/src/ujson/lib',
- 'pandas/_libs/src/datetime'] +
- common_include),
+ include_dirs=['pandas/_libs/src/ujson/python',
+ 'pandas/_libs/src/ujson/lib',
+ 'pandas/_libs/src/datetime'],
extra_compile_args=(['-D_GNU_SOURCE'] +
extra_compile_args))
| De-couple ujson from numpy_helper | https://api.github.com/repos/pandas-dev/pandas/pulls/19415 | 2018-01-26T18:45:41Z | 2018-01-27T01:06:43Z | 2018-01-27T01:06:43Z | 2018-01-31T06:49:26Z |
Refactor out libwriters, fix references to Timestamp, Timedelta | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index c3a654b01022c..e1d59f807a7fd 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -21,14 +21,7 @@ from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyBytes_Check,
PyUnicode_Check,
PyTuple_New,
- PyObject_RichCompareBool,
- PyBytes_GET_SIZE,
- PyUnicode_GET_SIZE)
-
-try:
- from cpython cimport PyString_GET_SIZE
-except ImportError:
- from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE
+ PyObject_RichCompareBool)
cimport cpython
@@ -38,7 +31,7 @@ from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT
-from tslib import NaT, Timestamp, Timedelta, array_to_datetime
+from tslib import NaT, array_to_datetime
from missing cimport checknull
@@ -127,28 +120,6 @@ def item_from_zerodim(object val):
return util.unbox_if_zerodim(val)
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def fast_unique(ndarray[object] values):
- cdef:
- Py_ssize_t i, n = len(values)
- list uniques = []
- dict table = {}
- object val, stub = 0
-
- for i from 0 <= i < n:
- val = values[i]
- if val not in table:
- table[val] = stub
- uniques.append(val)
- try:
- uniques.sort()
- except Exception:
- pass
-
- return uniques
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def fast_unique_multiple(list arrays):
@@ -368,30 +339,6 @@ def has_infs_f8(ndarray[float64_t] arr):
return False
-def convert_timestamps(ndarray values):
- cdef:
- object val, f, result
- dict cache = {}
- Py_ssize_t i, n = len(values)
- ndarray[object] out
-
- # for HDFStore, a bit temporary but...
-
- from datetime import datetime
- f = datetime.fromtimestamp
-
- out = np.empty(n, dtype='O')
-
- for i in range(n):
- val = util.get_value_1d(values, i)
- if val in cache:
- out[i] = cache[val]
- else:
- cache[val] = out[i] = f(val)
-
- return out
-
-
def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len):
cdef:
Py_ssize_t i, n = len(indices)
@@ -731,145 +678,6 @@ def clean_index_list(list obj):
return np.asarray(obj), 0
-ctypedef fused pandas_string:
- str
- unicode
- bytes
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr):
- """ return the maximum size of elements in a 1-dim string array """
- cdef:
- Py_ssize_t i, m = 0, l = 0, length = arr.shape[0]
- pandas_string v
-
- for i in range(length):
- v = arr[i]
- if PyString_Check(v):
- l = PyString_GET_SIZE(v)
- elif PyBytes_Check(v):
- l = PyBytes_GET_SIZE(v)
- elif PyUnicode_Check(v):
- l = PyUnicode_GET_SIZE(v)
-
- if l > m:
- m = l
-
- return m
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def string_array_replace_from_nan_rep(
- ndarray[object, ndim=1] arr, object nan_rep,
- object replace=None):
- """
- Replace the values in the array with 'replacement' if
- they are 'nan_rep'. Return the same array.
- """
-
- cdef int length = arr.shape[0], i = 0
- if replace is None:
- replace = np.nan
-
- for i from 0 <= i < length:
- if arr[i] == nan_rep:
- arr[i] = replace
-
- return arr
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def convert_json_to_lines(object arr):
- """
- replace comma separated json with line feeds, paying special attention
- to quotes & brackets
- """
- cdef:
- Py_ssize_t i = 0, num_open_brackets_seen = 0, length
- bint in_quotes = 0, is_escaping = 0
- ndarray[uint8_t] narr
- unsigned char v, comma, left_bracket, right_brack, newline
-
- newline = ord('\n')
- comma = ord(',')
- left_bracket = ord('{')
- right_bracket = ord('}')
- quote = ord('"')
- backslash = ord('\\')
-
- narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy()
- length = narr.shape[0]
- for i in range(length):
- v = narr[i]
- if v == quote and i > 0 and not is_escaping:
- in_quotes = ~in_quotes
- if v == backslash or is_escaping:
- is_escaping = ~is_escaping
- if v == comma: # commas that should be \n
- if num_open_brackets_seen == 0 and not in_quotes:
- narr[i] = newline
- elif v == left_bracket:
- if not in_quotes:
- num_open_brackets_seen += 1
- elif v == right_bracket:
- if not in_quotes:
- num_open_brackets_seen -= 1
-
- return narr.tostring().decode('utf-8')
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def write_csv_rows(list data, ndarray data_index,
- int nlevels, ndarray cols, object writer):
-
- cdef int N, j, i, ncols
- cdef list rows
- cdef object val
-
- # In crude testing, N>100 yields little marginal improvement
- N=100
-
- # pre-allocate rows
- ncols = len(cols)
- rows = [[None] * (nlevels + ncols) for x in range(N)]
-
- j = -1
- if nlevels == 1:
- for j in range(len(data_index)):
- row = rows[j % N]
- row[0] = data_index[j]
- for i in range(ncols):
- row[1 + i] = data[i][j]
-
- if j >= N - 1 and j % N == N - 1:
- writer.writerows(rows)
- elif nlevels > 1:
- for j in range(len(data_index)):
- row = rows[j % N]
- row[:nlevels] = list(data_index[j])
- for i in range(ncols):
- row[nlevels + i] = data[i][j]
-
- if j >= N - 1 and j % N == N - 1:
- writer.writerows(rows)
- else:
- for j in range(len(data_index)):
- row = rows[j % N]
- for i in range(ncols):
- row[i] = data[i][j]
-
- if j >= N - 1 and j % N == N - 1:
- writer.writerows(rows)
-
- if j >= 0 and (j < N - 1 or (j % N) != N - 1):
- writer.writerows(rows[:((j + 1) % N)])
-
-
# ------------------------------------------------------------------------------
# Groupby-related functions
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index efe61716d0831..89d2de6de213a 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -2225,3 +2225,37 @@ def _maybe_encode(values):
if values is None:
return []
return [x.encode('utf-8') if isinstance(x, unicode) else x for x in values]
+
+
+def sanitize_objects(ndarray[object] values, set na_values,
+ convert_empty=True):
+ """
+ Convert specified values, including the given set na_values and empty
+ strings if convert_empty is True, to np.nan.
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ na_values : set
+ convert_empty : bool (default True)
+ """
+ cdef:
+ Py_ssize_t i, n
+ object val, onan
+ Py_ssize_t na_count = 0
+ dict memo = {}
+
+ n = len(values)
+ onan = np.nan
+
+ for i from 0 <= i < n:
+ val = values[i]
+ if (convert_empty and val == '') or (val in na_values):
+ values[i] = onan
+ na_count += 1
+ elif val in memo:
+ values[i] = memo[val]
+ else:
+ memo[val] = val
+
+ return na_count
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index b29a2e519efcd..75bff34e4a391 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -6,7 +6,7 @@ from tslibs.nattype import NaT
from tslibs.conversion cimport convert_to_tsobject
from tslibs.timedeltas cimport convert_to_timedelta64
from tslibs.timezones cimport get_timezone, tz_compare
-from datetime import datetime, timedelta
+
iNaT = util.get_nat()
cdef bint PY2 = sys.version_info[0] == 2
@@ -1405,30 +1405,6 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
return objects
-def sanitize_objects(ndarray[object] values, set na_values,
- convert_empty=True):
- cdef:
- Py_ssize_t i, n
- object val, onan
- Py_ssize_t na_count = 0
- dict memo = {}
-
- n = len(values)
- onan = np.nan
-
- for i from 0 <= i < n:
- val = values[i]
- if (convert_empty and val == '') or (val in na_values):
- values[i] = onan
- na_count += 1
- elif val in memo:
- values[i] = memo[val]
- else:
- memo[val] = val
-
- return na_count
-
-
def maybe_convert_bool(ndarray[object] arr,
true_values=None, false_values=None):
cdef:
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
new file mode 100644
index 0000000000000..6f07d04b3fad3
--- /dev/null
+++ b/pandas/_libs/writers.pyx
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+
+cimport cython
+from cython cimport Py_ssize_t
+
+from cpython cimport (PyString_Check, PyBytes_Check, PyUnicode_Check,
+ PyBytes_GET_SIZE, PyUnicode_GET_SIZE)
+
+try:
+ from cpython cimport PyString_GET_SIZE
+except ImportError:
+ from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE
+
+import numpy as np
+cimport numpy as cnp
+from numpy cimport ndarray, uint8_t
+cnp.import_array()
+
+cimport util
+
+
+ctypedef fused pandas_string:
+ str
+ unicode
+ bytes
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def write_csv_rows(list data, ndarray data_index,
+ int nlevels, ndarray cols, object writer):
+ """
+ Write the given data to the writer object, pre-allocating where possible
+ for performance improvements.
+
+ Parameters
+ ----------
+ data : list
+ data_index : ndarray
+ nlevels : int
+ cols : ndarray
+ writer : object
+ """
+ cdef int N, j, i, ncols
+ cdef list rows
+ cdef object val
+
+ # In crude testing, N>100 yields little marginal improvement
+ N = 100
+
+ # pre-allocate rows
+ ncols = len(cols)
+ rows = [[None] * (nlevels + ncols) for x in range(N)]
+
+ j = -1
+ if nlevels == 1:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ row[0] = data_index[j]
+ for i in range(ncols):
+ row[1 + i] = data[i][j]
+
+ if j >= N - 1 and j % N == N - 1:
+ writer.writerows(rows)
+ elif nlevels > 1:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ row[:nlevels] = list(data_index[j])
+ for i in range(ncols):
+ row[nlevels + i] = data[i][j]
+
+ if j >= N - 1 and j % N == N - 1:
+ writer.writerows(rows)
+ else:
+ for j in range(len(data_index)):
+ row = rows[j % N]
+ for i in range(ncols):
+ row[i] = data[i][j]
+
+ if j >= N - 1 and j % N == N - 1:
+ writer.writerows(rows)
+
+ if j >= 0 and (j < N - 1 or (j % N) != N - 1):
+ writer.writerows(rows[:((j + 1) % N)])
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def convert_json_to_lines(object arr):
+ """
+ replace comma separated json with line feeds, paying special attention
+ to quotes & brackets
+ """
+ cdef:
+ Py_ssize_t i = 0, num_open_brackets_seen = 0, length
+ bint in_quotes = 0, is_escaping = 0
+ ndarray[uint8_t] narr
+ unsigned char v, comma, left_bracket, right_brack, newline
+
+ newline = ord('\n')
+ comma = ord(',')
+ left_bracket = ord('{')
+ right_bracket = ord('}')
+ quote = ord('"')
+ backslash = ord('\\')
+
+ narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy()
+ length = narr.shape[0]
+ for i in range(length):
+ v = narr[i]
+ if v == quote and i > 0 and not is_escaping:
+ in_quotes = ~in_quotes
+ if v == backslash or is_escaping:
+ is_escaping = ~is_escaping
+ if v == comma: # commas that should be \n
+ if num_open_brackets_seen == 0 and not in_quotes:
+ narr[i] = newline
+ elif v == left_bracket:
+ if not in_quotes:
+ num_open_brackets_seen += 1
+ elif v == right_bracket:
+ if not in_quotes:
+ num_open_brackets_seen -= 1
+
+ return narr.tostring().decode('utf-8')
+
+
+# stata, pytables
+@cython.boundscheck(False)
+@cython.wraparound(False)
+cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr):
+ """ return the maximum size of elements in a 1-dim string array """
+ cdef:
+ Py_ssize_t i, m = 0, l = 0, length = arr.shape[0]
+ pandas_string v
+
+ for i in range(length):
+ v = arr[i]
+ if PyString_Check(v):
+ l = PyString_GET_SIZE(v)
+ elif PyBytes_Check(v):
+ l = PyBytes_GET_SIZE(v)
+ elif PyUnicode_Check(v):
+ l = PyUnicode_GET_SIZE(v)
+
+ if l > m:
+ m = l
+
+ return m
+
+
+# ------------------------------------------------------------------
+# PyTables Helpers
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def string_array_replace_from_nan_rep(
+ ndarray[object, ndim=1] arr, object nan_rep,
+ object replace=None):
+ """
+ Replace the values in the array with 'replacement' if
+ they are 'nan_rep'. Return the same array.
+ """
+
+ cdef int length = arr.shape[0], i = 0
+ if replace is None:
+ replace = np.nan
+
+ for i from 0 <= i < length:
+ if arr[i] == nan_rep:
+ arr[i] = replace
+
+ return arr
diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py
index 6a298f5137eb1..c3128be0f5599 100644
--- a/pandas/core/computation/scope.py
+++ b/pandas/core/computation/scope.py
@@ -48,7 +48,7 @@ def _raw_hex_id(obj):
_DEFAULT_GLOBALS = {
- 'Timestamp': pandas._libs.lib.Timestamp,
+ 'Timestamp': pandas._libs.tslib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 5155662d2f97d..b2816343fc8eb 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -282,7 +282,7 @@ def maybe_promote(dtype, fill_value=np.nan):
fill_value = iNaT
elif issubclass(dtype.type, np.timedelta64):
try:
- fill_value = lib.Timedelta(fill_value).value
+ fill_value = tslib.Timedelta(fill_value).value
except Exception:
# as for datetimes, cannot upcast to object
fill_value = iNaT
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e777281b11e1..aaa4ae4773108 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10,7 +10,7 @@
import numpy as np
import pandas as pd
-from pandas._libs import tslib, lib, properties
+from pandas._libs import tslib, properties
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
@@ -7216,9 +7216,9 @@ def describe_categorical_1d(data):
if is_datetime64_dtype(data):
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
- result += [lib.Timestamp(top), freq,
- lib.Timestamp(asint.min()),
- lib.Timestamp(asint.max())]
+ result += [tslib.Timestamp(top), freq,
+ tslib.Timestamp(asint.min()),
+ tslib.Timestamp(asint.max())]
else:
names += ['top', 'freq']
result += [top, freq]
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3e5e4c99a899..22d38d3df071e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2656,7 +2656,7 @@ def _try_coerce_args(self, values, other):
other = other.asi8
other_mask = isna(other)
elif isinstance(other, (np.datetime64, datetime, date)):
- other = lib.Timestamp(other)
+ other = tslib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
@@ -2675,7 +2675,7 @@ def _try_coerce_result(self, result):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
- result = lib.Timestamp(result, tz=self.values.tz)
+ result = tslib.Timestamp(result, tz=self.values.tz)
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index eda86f12d501d..d4851f579dda4 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -268,7 +268,7 @@ def _wrap_results(result, dtype):
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
- result = lib.Timestamp(result)
+ result = tslib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
@@ -278,7 +278,7 @@ def _wrap_results(result, dtype):
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
- result = lib.Timedelta(result, unit='ns')
+ result = tslib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 706bec9e44892..961c8c004e9e3 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -24,7 +24,7 @@
from pandas.compat.numpy import function as nv
from pandas._libs import lib, tslib
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.util._decorators import Appender, Substitution
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index bca0b64cb53fe..269c81b380b5e 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -38,7 +38,7 @@
_stringify_path)
from pandas.io.formats.printing import adjoin, justify, pprint_thing
from pandas.io.formats.common import get_level_lengths
-from pandas._libs import lib
+from pandas._libs import lib, writers as libwriters
from pandas._libs.tslib import (iNaT, Timestamp, Timedelta,
format_array_from_datetime)
from pandas.core.indexes.datetimes import DatetimeIndex
@@ -1789,7 +1789,8 @@ def _save_chunk(self, start_i, end_i):
date_format=self.date_format,
quoting=self.quoting)
- lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
+ libwriters.write_csv_rows(self.data, ix, self.nlevels,
+ self.cols, self.writer)
# ----------------------------------------------------------------------
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 595031b04e367..c7901f4352d00 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -5,7 +5,7 @@
from collections import defaultdict
import numpy as np
-from pandas._libs.lib import convert_json_to_lines
+from pandas._libs.writers import convert_json_to_lines
from pandas import compat, DataFrame
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 5135bb01fb378..af1441f4a0fc9 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1596,11 +1596,12 @@ def _infer_types(self, values, na_values, try_num_bool=True):
except Exception:
result = values
if values.dtype == np.object_:
- na_count = lib.sanitize_objects(result, na_values, False)
+ na_count = parsers.sanitize_objects(result, na_values,
+ False)
else:
result = values
if values.dtype == np.object_:
- na_count = lib.sanitize_objects(values, na_values, False)
+ na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = lib.maybe_convert_bool(values,
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5376473f83f22..0d833807602e1 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -47,7 +47,7 @@
from pandas.core.config import get_option
from pandas.core.computation.pytables import Expr, maybe_expression
-from pandas._libs import algos, lib
+from pandas._libs import algos, lib, writers as libwriters
from pandas._libs.tslibs import timezones
from distutils.version import LooseVersion
@@ -3843,7 +3843,7 @@ def read(self, where=None, columns=None, **kwargs):
# need a better algorithm
tuple_index = long_index.values
- unique_tuples = lib.fast_unique(tuple_index)
+ unique_tuples = unique(tuple_index)
unique_tuples = com._asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
@@ -4561,7 +4561,8 @@ def _convert_string_array(data, encoding, itemsize=None):
# create the sized dtype
if itemsize is None:
- itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))
+ ensured = _ensure_object(data.ravel())
+ itemsize = libwriters.max_len_string_array(ensured)
data = np.asarray(data, dtype="S%d" % itemsize)
return data
@@ -4590,7 +4591,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None):
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
- itemsize = lib.max_len_string_array(_ensure_object(data))
+ itemsize = libwriters.max_len_string_array(_ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
@@ -4604,7 +4605,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None):
if nan_rep is None:
nan_rep = 'nan'
- data = lib.string_array_replace_from_nan_rep(data, nan_rep)
+ data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
@@ -4621,7 +4622,7 @@ def _get_converter(kind, encoding):
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
- return lib.convert_timestamps
+ return lambda x: to_datetime(x, cache=True).to_pydatetime()
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index b409cf20e9a09..16665e19985f1 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -16,8 +16,9 @@
import numpy as np
from dateutil.relativedelta import relativedelta
-from pandas._libs.lib import max_len_string_array, infer_dtype
+from pandas._libs.lib import infer_dtype
from pandas._libs.tslib import NaT, Timestamp
+from pandas._libs.writers import max_len_string_array
import pandas as pd
from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index 66ee7fa98491f..07163615c6ba4 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -23,7 +23,7 @@
from pandas.compat import lrange
import pandas.compat as compat
-import pandas._libs.lib as lib
+from pandas._libs import tslib
import pandas.core.common as com
from pandas.core.index import Index
@@ -52,7 +52,7 @@
def get_pairs():
pairs = [
- (lib.Timestamp, DatetimeConverter),
+ (tslib.Timestamp, DatetimeConverter),
(Period, PeriodConverter),
(pydt.datetime, DatetimeConverter),
(pydt.date, DatetimeConverter),
@@ -312,7 +312,7 @@ def try_parse(values):
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
- return _dt_to_float_ordinal(lib.Timestamp(values))
+ return _dt_to_float_ordinal(tslib.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (is_integer(values) or is_float(values)):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index b59dd25ead57f..197a42bdaacbb 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -7,7 +7,6 @@
import pandas as pd
from pandas import offsets
import pandas.util.testing as tm
-from pandas._libs import lib
from pandas._libs.tslib import OutOfBoundsDatetime
from pandas._libs.tslibs import conversion
from pandas import (DatetimeIndex, Index, Timestamp, datetime, date_range,
@@ -537,7 +536,7 @@ def test_datetimeindex_constructor_misc(self):
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
- arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
+ arr = [Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 508c3a73f48c7..974099f1fbbe9 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -29,7 +29,7 @@
from pandas.core.indexes.datetimes import _to_m8
import pandas as pd
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
class TestIndex(Base):
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index aedc957ec67da..e59456b8a2d5e 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -19,7 +19,7 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 3de1c4c982654..0c1bec7a6f1a9 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -13,7 +13,7 @@
import pandas.util.testing as tm
import pandas as pd
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
from pandas.tests.indexes.common import Base
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index cd1685f282bd2..e949772981eb7 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -425,7 +425,7 @@ def test_npy_nat(self):
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
- from pandas._libs.lib import Timestamp
+ from pandas._libs.tslib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 8525cb42c2455..bc972076c6a80 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -11,7 +11,7 @@
import pytest
import numpy as np
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/io/parser/converters.py b/pandas/tests/io/parser/converters.py
index 1176b1e84e29b..ae35d45591dc5 100644
--- a/pandas/tests/io/parser/converters.py
+++ b/pandas/tests/io/parser/converters.py
@@ -13,7 +13,7 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
from pandas.compat import parse_date, StringIO, lmap
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index b7d0dd1a3484f..919b357f14236 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -11,7 +11,7 @@
import pytest
import numpy as np
from pandas._libs.tslibs import parsing
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
import pandas as pd
import pandas.io.parsers as parsers
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index ec240531925e3..7717102b64fc5 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -5,7 +5,7 @@
from pandas import read_csv, read_table, DataFrame
import pandas.core.common as com
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
from .common import ParserTests
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index 8767055239cd5..195fb4cba2aed 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -11,7 +11,7 @@
import pandas.util.testing as tm
from pandas import DataFrame, Index
-from pandas._libs.lib import Timestamp
+from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index fbfbad547ce1b..e5c3d6f7d3ee1 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -17,7 +17,7 @@
Categorical)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
-from pandas._libs import tslib, lib
+from pandas._libs import tslib
from pandas.compat import lrange, range
from pandas import compat
@@ -2707,7 +2707,7 @@ def test_fancy_getitem(self):
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
- assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48
+ assert s[Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index 10061204df42a..502f0c3bced61 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -3,7 +3,7 @@
import pytest
import numpy as np
-from pandas._libs import lib
+from pandas._libs import lib, writers as libwriters
import pandas.util.testing as tm
@@ -12,19 +12,19 @@ class TestMisc(object):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
- assert lib.max_len_string_array(arr) == 3
+ assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype('U').astype(object)
- assert lib.max_len_string_array(arr) == 3
+ assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype('S').astype(object)
- assert lib.max_len_string_array(arr) == 3
+ assert libwriters.max_len_string_array(arr) == 3
# raises
pytest.raises(TypeError,
- lambda: lib.max_len_string_array(arr.astype('U')))
+ lambda: libwriters.max_len_string_array(arr.astype('U')))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
diff --git a/setup.py b/setup.py
index 721e6f62bd3e4..4d42379eef11b 100755
--- a/setup.py
+++ b/setup.py
@@ -328,6 +328,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/tslibs/frequencies.pyx',
'pandas/_libs/tslibs/resolution.pyx',
'pandas/_libs/tslibs/parsing.pyx',
+ 'pandas/_libs/writers.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
@@ -616,6 +617,9 @@ def pxd(name):
'_libs.window': {
'pyxfile': '_libs/window',
'pxdfiles': ['_libs/skiplist', '_libs/src/util']},
+ '_libs.writers': {
+ 'pyxfile': '_libs/writers',
+ 'pxdfiles': ['_libs/src/util']},
'io.sas._sas': {
'pyxfile': 'io/sas/sas'}}
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19413 | 2018-01-26T17:49:43Z | 2018-02-01T11:33:01Z | 2018-02-01T11:33:01Z | 2018-02-11T21:57:03Z |
[#7292] BUG: asfreq / pct_change strange behavior | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 4dde76dee46a5..b5b52c7b9c89b 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -419,6 +419,7 @@ Datetimelike
- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`)
+- Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`)
Timezones
^^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e777281b11e1..bee954aa9bba8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7315,6 +7315,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
+ rs = rs.reindex_like(data)
if freq is None:
mask = isna(com._values_from_object(self))
np.putmask(rs.values, mask, np.nan)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 3af798acdede5..e6b47fd69cb05 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -108,7 +108,9 @@ def test_pct_change(self):
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
- assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
+ assert_frame_equal(rs,
+ (filled / filled.shift(freq='5D') - 1)
+ .reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
@@ -120,6 +122,38 @@ def test_pct_change_shift_over_nas(self):
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
+ def test_pct_change_periods_freq(self):
+ # GH 7292
+ rs_freq = self.tsframe.pct_change(freq='5B')
+ rs_periods = self.tsframe.pct_change(5)
+ assert_frame_equal(rs_freq, rs_periods)
+
+ rs_freq = self.tsframe.pct_change(freq='3B', fill_method=None)
+ rs_periods = self.tsframe.pct_change(3, fill_method=None)
+ assert_frame_equal(rs_freq, rs_periods)
+
+ rs_freq = self.tsframe.pct_change(freq='3B', fill_method='bfill')
+ rs_periods = self.tsframe.pct_change(3, fill_method='bfill')
+ assert_frame_equal(rs_freq, rs_periods)
+
+ rs_freq = self.tsframe.pct_change(freq='7B',
+ fill_method='pad',
+ limit=1)
+ rs_periods = self.tsframe.pct_change(7, fill_method='pad', limit=1)
+ assert_frame_equal(rs_freq, rs_periods)
+
+ rs_freq = self.tsframe.pct_change(freq='7B',
+ fill_method='bfill',
+ limit=3)
+ rs_periods = self.tsframe.pct_change(7, fill_method='bfill', limit=3)
+ assert_frame_equal(rs_freq, rs_periods)
+
+ empty_ts = DataFrame(index=self.tsframe.index,
+ columns=self.tsframe.columns)
+ rs_freq = empty_ts.pct_change(freq='14B')
+ rs_periods = empty_ts.pct_change(14)
+ assert_frame_equal(rs_freq, rs_periods)
+
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 7be801629e387..7a1aff1cc223c 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -344,7 +344,9 @@ def test_pct_change(self):
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
- assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
+ assert_series_equal(rs,
+ (filled / filled.shift(freq='5D') - 1)
+ .reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
@@ -353,6 +355,33 @@ def test_pct_change_shift_over_nas(self):
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
+ def test_pct_change_periods_freq(self):
+ # GH 7292
+ rs_freq = self.ts.pct_change(freq='5B')
+ rs_periods = self.ts.pct_change(5)
+ assert_series_equal(rs_freq, rs_periods)
+
+ rs_freq = self.ts.pct_change(freq='3B', fill_method=None)
+ rs_periods = self.ts.pct_change(3, fill_method=None)
+ assert_series_equal(rs_freq, rs_periods)
+
+ rs_freq = self.ts.pct_change(freq='3B', fill_method='bfill')
+ rs_periods = self.ts.pct_change(3, fill_method='bfill')
+ assert_series_equal(rs_freq, rs_periods)
+
+ rs_freq = self.ts.pct_change(freq='7B', fill_method='pad', limit=1)
+ rs_periods = self.ts.pct_change(7, fill_method='pad', limit=1)
+ assert_series_equal(rs_freq, rs_periods)
+
+ rs_freq = self.ts.pct_change(freq='7B', fill_method='bfill', limit=3)
+ rs_periods = self.ts.pct_change(7, fill_method='bfill', limit=3)
+ assert_series_equal(rs_freq, rs_periods)
+
+ empty_ts = Series(index=self.ts.index)
+ rs_freq = empty_ts.pct_change(freq='14B')
+ rs_periods = empty_ts.pct_change(14)
+ assert_series_equal(rs_freq, rs_periods)
+
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
| - [x] closes https://github.com/pandas-dev/pandas/issues/7292
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19410 | 2018-01-26T12:46:18Z | 2018-01-31T11:34:13Z | 2018-01-31T11:34:12Z | 2018-01-31T11:37:19Z |
Pin Thrift to 0.10.* | diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run
index db2d429a2a4ff..3042888763863 100644
--- a/ci/requirements-3.6_WIN.run
+++ b/ci/requirements-3.6_WIN.run
@@ -12,5 +12,6 @@ numexpr
pytables
matplotlib
blosc
+thrift=0.10*
fastparquet
pyarrow
| fastparquet compatibility in https://github.com/dask/fastparquet/pull/281, which will be released before long. But let's pin for now. | https://api.github.com/repos/pandas-dev/pandas/pulls/19408 | 2018-01-26T08:16:35Z | 2018-01-26T10:11:58Z | 2018-01-26T10:11:58Z | 2018-02-16T11:54:28Z |
DOC: catch warnings in test_feather & other | diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 4508d5c1e1781..6e1b6e14861c3 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -215,7 +215,10 @@ def read(self, path, columns=None, **kwargs):
# We need to retain the original path(str) while also
# pass the S3File().open function to fsatparquet impl.
s3, _, _ = get_filepath_or_buffer(path)
- parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open)
+ try:
+ parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open)
+ finally:
+ s3.close()
else:
path, _, _ = get_filepath_or_buffer(path)
parquet_file = self.api.ParquetFile(path)
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index e9909400ce429..9d04111d64125 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -1,5 +1,6 @@
""" test feather-format compat """
from distutils.version import LooseVersion
+from warnings import catch_warnings
import numpy as np
@@ -31,7 +32,9 @@ def check_round_trip(self, df, **kwargs):
with ensure_clean() as path:
to_feather(df, path)
- result = read_feather(path, **kwargs)
+
+ with catch_warnings(record=True):
+ result = read_feather(path, **kwargs)
assert_frame_equal(result, df)
def test_error(self):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 8a6a22abe23fa..6c172c80514e7 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -148,7 +148,8 @@ def check_round_trip(df, engine=None, path=None,
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
- actual = read_parquet(path, **read_kwargs)
+ with catch_warnings(record=True):
+ actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual,
check_names=check_names)
@@ -228,35 +229,20 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
- result = read_parquet(path, engine=pa)
- tm.assert_frame_equal(result, df)
-
- result = read_parquet(path, engine=pa, columns=['a', 'd'])
- tm.assert_frame_equal(result, df[['a', 'd']])
-
-
-def check_round_trip_equals(df, path, engine,
- write_kwargs, read_kwargs,
- expected, check_names):
-
- df.to_parquet(path, engine, **write_kwargs)
- actual = read_parquet(path, engine, **read_kwargs)
- tm.assert_frame_equal(expected, actual,
- check_names=check_names)
+ with catch_warnings(record=True):
+ result = read_parquet(path, engine=pa)
+ tm.assert_frame_equal(result, df)
- # repeat
- df.to_parquet(path, engine, **write_kwargs)
- actual = read_parquet(path, engine, **read_kwargs)
- tm.assert_frame_equal(expected, actual,
- check_names=check_names)
+ result = read_parquet(path, engine=pa, columns=['a', 'd'])
+ tm.assert_frame_equal(result, df[['a', 'd']])
class Base(object):
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
- with pytest.raises(exc):
- with tm.ensure_clean() as path:
+ with tm.ensure_clean() as path:
+ with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 34e634f56aec6..941bdcbc8b064 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -205,6 +205,7 @@ def decompress_file(path, compression):
raise ValueError(msg)
yield f
+ f.close()
def assert_almost_equal(left, right, check_exact=False,
| https://api.github.com/repos/pandas-dev/pandas/pulls/19407 | 2018-01-26T03:50:33Z | 2018-01-26T11:54:08Z | 2018-01-26T11:54:08Z | 2018-01-26T12:44:46Z | |
standardize cimports of numpy as "cnp" | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 9a7af71e74574..5d17488963b1c 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -1,20 +1,14 @@
# cython: profile=False
-cimport numpy as np
-import numpy as np
-
cimport cython
from cython cimport Py_ssize_t
-np.import_array()
-
-cdef float64_t FP_ERR = 1e-13
-
-cimport util
-
from libc.stdlib cimport malloc, free
from libc.string cimport memmove
+from libc.math cimport fabs, sqrt
+import numpy as np
+cimport numpy as cnp
from numpy cimport (ndarray,
NPY_INT64, NPY_UINT64, NPY_INT32, NPY_INT16, NPY_INT8,
NPY_FLOAT32, NPY_FLOAT64,
@@ -22,18 +16,19 @@ from numpy cimport (ndarray,
int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
uint32_t, uint64_t, float32_t, float64_t,
double_t)
+cnp.import_array()
-cdef double NaN = <double> np.NaN
-cdef double nan = NaN
-
-from libc.math cimport fabs, sqrt
-
-# this is our util.pxd
+cimport util
from util cimport numeric, get_nat
import missing
+cdef float64_t FP_ERR = 1e-13
+
+cdef double NaN = <double> np.NaN
+cdef double nan = NaN
+
cdef int64_t iNaT = get_nat()
cdef:
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index 8ccc6e036da80..2f40bd4349a2e 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -50,7 +50,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True,
ndarray[float64_t] ranks
ndarray[int64_t] argsorted
- ndarray[np.uint8_t, cast=True] sorted_mask
+ ndarray[uint8_t, cast=True] sorted_mask
{{if dtype == 'uint64'}}
{{ctype}} val
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 72c2834b0bd57..07b4b80603e03 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -1,6 +1,22 @@
# cython: profile=False
-from cpython cimport PyObject, Py_INCREF, PyList_Check, PyTuple_Check
+cimport cython
+
+from cpython cimport (PyObject, Py_INCREF, PyList_Check, PyTuple_Check,
+ PyMem_Malloc, PyMem_Realloc, PyMem_Free,
+ PyString_Check, PyBytes_Check,
+ PyUnicode_Check)
+
+from libc.stdlib cimport malloc, free
+
+import numpy as np
+cimport numpy as cnp
+from numpy cimport ndarray, uint8_t, uint32_t
+cnp.import_array()
+
+cdef extern from "numpy/npy_math.h":
+ double NAN "NPY_NAN"
+
from khash cimport (
khiter_t,
@@ -23,29 +39,13 @@ from khash cimport (
kh_put_pymap, kh_resize_pymap)
-from numpy cimport ndarray, uint8_t, uint32_t
-
-from libc.stdlib cimport malloc, free
-from cpython cimport (PyMem_Malloc, PyMem_Realloc, PyMem_Free,
- PyString_Check, PyBytes_Check,
- PyUnicode_Check)
-
from util cimport _checknan
cimport util
-import numpy as np
-nan = np.nan
-
-cdef extern from "numpy/npy_math.h":
- double NAN "NPY_NAN"
-
-cimport cython
-cimport numpy as cnp
-
from missing cimport checknull
-cnp.import_array()
-cnp.import_ufunc()
+
+nan = np.nan
cdef int64_t iNaT = util.get_nat()
_SIZE_HINT_LIMIT = (1 << 20) + 7
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 15aef867ba413..996ece063b980 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,17 +1,19 @@
# cython: profile=False
+from datetime import datetime, timedelta, date
-from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t,
- NPY_DATETIME, NPY_TIMEDELTA)
cimport cython
-cimport numpy as cnp
+from cpython cimport PyTuple_Check, PyList_Check
+from cpython.slice cimport PySlice_Check
+import numpy as np
+cimport numpy as cnp
+from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t,
+ NPY_DATETIME, NPY_TIMEDELTA)
cnp.import_array()
-cnp.import_ufunc()
-cimport util
-import numpy as np
+cimport util
from tslibs.conversion cimport maybe_datetimelike_to_i8
@@ -20,10 +22,6 @@ from hashtable cimport HashTable
from pandas._libs import algos, hashtable as _hash
from pandas._libs.tslibs import period as periodlib
from pandas._libs.tslib import Timestamp, Timedelta
-from datetime import datetime, timedelta, date
-
-from cpython cimport PyTuple_Check, PyList_Check
-from cpython.slice cimport PySlice_Check
cdef int64_t iNaT = util.get_nat()
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index a5abe324254ce..b46a05a0842c3 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -10,7 +10,6 @@ cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
import numpy as np
-cimport numpy as np
from numpy cimport int64_t
cdef extern from "compat_helper.h":
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 0718f8bd2b970..c0b2ca66e30a6 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -1,4 +1,4 @@
-cimport numpy as np
+cimport numpy as cnp
import numpy as np
cimport util
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 344c5d25d0c3d..27d2a639d13e6 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -1,16 +1,15 @@
# cython: profile=False
-cimport numpy as np
-import numpy as np
-
cimport cython
from cython cimport Py_ssize_t
-np.import_array()
-
+import numpy as np
+cimport numpy as cnp
from numpy cimport (ndarray,
int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
uint32_t, uint64_t, float32_t, float64_t)
+cnp.import_array()
+
cdef double NaN = <double> np.NaN
cdef double nan = NaN
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index db0ff2931d96f..c3a654b01022c 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -5,7 +5,7 @@ cimport cython
from cython cimport Py_ssize_t
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
flatiter, NPY_OBJECT,
@@ -13,9 +13,7 @@ from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
float32_t, float64_t,
uint8_t, uint64_t,
complex128_t)
-# initialize numpy
-np.import_array()
-np.import_ufunc()
+cnp.import_array()
from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
@@ -95,7 +93,7 @@ cpdef bint is_scalar(object val):
"""
- return (np.PyArray_IsAnyScalar(val)
+ return (cnp.PyArray_IsAnyScalar(val)
# As of numpy-1.9, PyArray_IsAnyScalar misses bytearrays on Py3.
or PyBytes_Check(val)
# We differ from numpy (as of 1.10), which claims that None is
@@ -710,7 +708,7 @@ def clean_index_list(list obj):
for i in range(n):
v = obj[i]
- if not (PyList_Check(v) or np.PyArray_Check(v) or hasattr(v, '_data')):
+ if not (PyList_Check(v) or util.is_array(v) or hasattr(v, '_data')):
all_arrays = 0
break
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 0b60fc2c5b4d1..dfd044131afb4 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -7,9 +7,9 @@ cimport cython
from cython cimport Py_ssize_t
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport ndarray, int64_t, uint8_t
-np.import_array()
+cnp.import_array()
cimport util
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 4ca87a777e497..3588ac14c87d1 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -8,13 +8,13 @@ from cpython cimport Py_INCREF
from libc.stdlib cimport malloc, free
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport (ndarray,
int64_t,
PyArray_SETITEM,
PyArray_ITER_NEXT, PyArray_ITER_DATA, PyArray_IterNew,
flatiter)
-np.import_array()
+cnp.import_array()
cimport util
from lib import maybe_convert_objects
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx
index c4104b66e009f..1d7893f69c31d 100644
--- a/pandas/_libs/reshape.pyx
+++ b/pandas/_libs/reshape.pyx
@@ -1,16 +1,15 @@
# cython: profile=False
-cimport numpy as np
-import numpy as np
-
cimport cython
from cython cimport Py_ssize_t
-np.import_array()
-
+import numpy as np
+cimport numpy as cnp
from numpy cimport (ndarray,
int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
uint32_t, uint64_t, float32_t, float64_t)
+cnp.import_array()
+
cdef double NaN = <double> np.NaN
cdef double nan = NaN
diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx
index c96413edfb0f2..5ede31b24118d 100644
--- a/pandas/_libs/skiplist.pyx
+++ b/pandas/_libs/skiplist.pyx
@@ -8,20 +8,20 @@
from libc.math cimport log
+import numpy as np
+cimport numpy as cnp
+from numpy cimport double_t
+cnp.import_array()
+
+
# MSVC does not have log2!
cdef double Log2(double x):
return log(x) / log(2.)
-cimport numpy as np
-import numpy as np
-from numpy cimport double_t
from random import random
-# initialize numpy
-np.import_array()
-
# TODO: optimize this, make less messy
cdef class Node:
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index bb8b0ed14e1d9..2abd270652433 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -1,12 +1,15 @@
-from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t,
- float64_t, float32_t)
-cimport numpy as np
+# -*- coding: utf-8 -*-
+import operator
+import sys
cimport cython
import numpy as np
-import operator
-import sys
+cimport numpy as cnp
+from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t,
+ float64_t, float32_t)
+cnp.import_array()
+
from distutils.version import LooseVersion
@@ -15,8 +18,6 @@ _np_version = np.version.short_version
_np_version_under1p10 = LooseVersion(_np_version) < LooseVersion('1.10')
_np_version_under1p11 = LooseVersion(_np_version) < LooseVersion('1.11')
-np.import_array()
-np.import_ufunc()
# -----------------------------------------------------------------------------
# Preamble stuff
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index 557ca57145f2b..f14d508a625d0 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -609,13 +609,13 @@ cdef class Validator:
cdef:
Py_ssize_t n
- np.dtype dtype
+ cnp.dtype dtype
bint skipna
def __cinit__(
self,
Py_ssize_t n,
- np.dtype dtype=np.dtype(np.object_),
+ cnp.dtype dtype=np.dtype(np.object_),
bint skipna=False
):
self.n = n
@@ -823,7 +823,7 @@ cdef class TemporalValidator(Validator):
def __cinit__(
self,
Py_ssize_t n,
- np.dtype dtype=np.dtype(np.object_),
+ cnp.dtype dtype=np.dtype(np.object_),
bint skipna=False
):
self.n = n
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c7035df8ac15c..81df7981096ba 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, ndarray, float64_t
import numpy as np
-np.import_array()
+cnp.import_array()
from cpython cimport PyFloat_Check
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
index ebd5fc12775a4..ae52f7dd30165 100644
--- a/pandas/_libs/tslibs/ccalendar.pyx
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -8,10 +8,9 @@ Cython implementations of functions resembling the stdlib calendar module
cimport cython
from cython cimport Py_ssize_t
-import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, int32_t
-np.import_array()
+cnp.import_array()
# ----------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 9cfe41172fedc..a32bfc1f6836c 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -5,9 +5,9 @@ cimport cython
from cython cimport Py_ssize_t
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, int32_t, ndarray
-np.import_array()
+cnp.import_array()
import pytz
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 18101c834c737..a8a865eec38dd 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -9,9 +9,9 @@ cimport cython
from cython cimport Py_ssize_t
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport ndarray, int64_t, int32_t, int8_t
-np.import_array()
+cnp.import_array()
from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek,
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
index cce3600371300..abaf8cad09bdb 100644
--- a/pandas/_libs/tslibs/frequencies.pyx
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -4,10 +4,9 @@ import re
cimport cython
-import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t
-np.import_array()
+cnp.import_array()
from util cimport is_integer_object, is_string_object
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 39f9437f0cecf..9f4ef4e515058 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -13,9 +13,9 @@ from cpython.datetime cimport (datetime,
PyDateTime_IMPORT
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t
-np.import_array()
+cnp.import_array()
from util cimport (get_nat,
is_integer_object, is_float_object,
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index a0ac6389c0646..e02818dd818df 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -10,9 +10,9 @@ from cpython.datetime cimport datetime, timedelta, time as dt_time
from dateutil.relativedelta import relativedelta
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t
-np.import_array()
+cnp.import_array()
from util cimport is_string_object, is_integer_object
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index a9a5500cd7447..09aeff852a0f2 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -18,9 +18,9 @@ from datetime import datetime
import time
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, ndarray
-np.import_array()
+cnp.import_array()
# Avoid import from outside _libs
if sys.version_info.major == 2:
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index 6eb867377bf54..b166babe5992c 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -4,9 +4,9 @@
from cython cimport Py_ssize_t
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport ndarray, int64_t
-np.import_array()
+cnp.import_array()
from util cimport is_string_object, get_nat
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 2921291973373..e7dabb94f8975 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -27,7 +27,6 @@ from cpython cimport PyFloat_Check
cimport cython
import numpy as np
-cimport numpy as np
from numpy cimport ndarray, int64_t
from datetime import date as datetime_date
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index b2c9c464c7cbf..1e6ea7794dfff 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -10,9 +10,9 @@ from cython cimport Py_ssize_t
from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, ndarray
-np.import_array()
+cnp.import_array()
from cpython.datetime cimport (datetime, timedelta,
PyDateTime_CheckExact,
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 1ddb299598fd0..b9be9c16eb6c3 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -6,9 +6,9 @@ from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare,
Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE)
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport int64_t, int32_t, ndarray
-np.import_array()
+cnp.import_array()
from datetime import time as datetime_time
from cpython.datetime cimport (datetime,
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 242b8262a8721..c22e0b8e555a3 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -18,9 +18,9 @@ UTC = pytz.utc
import numpy as np
-cimport numpy as np
+cimport numpy as cnp
from numpy cimport ndarray, int64_t
-np.import_array()
+cnp.import_array()
# ----------------------------------------------------------------------
from util cimport is_string_object, is_integer_object, get_nat
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index e46bf24c36f18..cacb073da581c 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -1,43 +1,40 @@
# cython: profile=False
# cython: boundscheck=False, wraparound=False, cdivision=True
+cimport cython
from cython cimport Py_ssize_t
-cimport numpy as np
+from libc.stdlib cimport malloc, free
+
import numpy as np
+cimport numpy as cnp
+from numpy cimport ndarray, double_t, int64_t, float64_t
+cnp.import_array()
-cimport cython
-np.import_array()
+cdef extern from "../src/headers/math.h":
+ int signbit(double) nogil
+ double sqrt(double x) nogil
cimport util
-
-from libc.stdlib cimport malloc, free
-
-from numpy cimport ndarray, double_t, int64_t, float64_t
+from util cimport numeric
from skiplist cimport (IndexableSkiplist,
node_t, skiplist_t,
skiplist_init, skiplist_destroy,
skiplist_get, skiplist_insert, skiplist_remove)
-cdef np.float32_t MINfloat32 = np.NINF
-cdef np.float64_t MINfloat64 = np.NINF
+cdef cnp.float32_t MINfloat32 = np.NINF
+cdef cnp.float64_t MINfloat64 = np.NINF
-cdef np.float32_t MAXfloat32 = np.inf
-cdef np.float64_t MAXfloat64 = np.inf
+cdef cnp.float32_t MAXfloat32 = np.inf
+cdef cnp.float64_t MAXfloat64 = np.inf
cdef double NaN = <double> np.NaN
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
-from util cimport numeric
-
-cdef extern from "../src/headers/math.h":
- int signbit(double) nogil
- double sqrt(double x) nogil
-
# Cython implementations of rolling sum, mean, variance, skewness,
# other statistical moment functions
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 41c03cb2799a3..e2a1107969990 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -2,16 +2,16 @@
# cython: boundscheck=False, initializedcheck=False
import numpy as np
-cimport numpy as np
-from numpy cimport uint8_t, uint16_t, int8_t, int64_t
+cimport numpy as cnp
+from numpy cimport uint8_t, uint16_t, int8_t, int64_t, ndarray
import sas_constants as const
# rle_decompress decompresses data using a Run Length Encoding
# algorithm. It is partially documented here:
#
# https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
-cdef np.ndarray[uint8_t, ndim=1] rle_decompress(
- int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
+cdef ndarray[uint8_t, ndim=1] rle_decompress(
+ int result_length, ndarray[uint8_t, ndim=1] inbuff):
cdef:
uint8_t control_byte, x
@@ -114,8 +114,8 @@ cdef np.ndarray[uint8_t, ndim=1] rle_decompress(
# rdc_decompress decompresses data using the Ross Data Compression algorithm:
#
# http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
-cdef np.ndarray[uint8_t, ndim=1] rdc_decompress(
- int result_length, np.ndarray[uint8_t, ndim=1] inbuff):
+cdef ndarray[uint8_t, ndim=1] rdc_decompress(
+ int result_length, ndarray[uint8_t, ndim=1] inbuff):
cdef:
uint8_t cmd
@@ -226,8 +226,8 @@ cdef class Parser(object):
int subheader_pointer_length
int current_page_type
bint is_little_endian
- np.ndarray[uint8_t, ndim=1] (*decompress)(
- int result_length, np.ndarray[uint8_t, ndim=1] inbuff)
+ ndarray[uint8_t, ndim=1] (*decompress)(
+ int result_length, ndarray[uint8_t, ndim=1] inbuff)
object parser
def __init__(self, object parser):
@@ -391,7 +391,7 @@ cdef class Parser(object):
Py_ssize_t j
int s, k, m, jb, js, current_row
int64_t lngt, start, ct
- np.ndarray[uint8_t, ndim=1] source
+ ndarray[uint8_t, ndim=1] source
int64_t[:] column_types
int64_t[:] lengths
int64_t[:] offsets
| In a few files this collects scattered numpy imports and puts them all in one place.
Removes unnecessary `np.import_ufunc()` calls.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19405 | 2018-01-26T01:19:49Z | 2018-01-27T01:09:11Z | 2018-01-27T01:09:11Z | 2018-01-31T06:49:26Z |
Make DateOffset.kwds a property | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 66e88e181ac0f..9cebe09280eba 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -341,6 +341,7 @@ Other API Changes
- :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`)
- Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`)
- Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`)
+- :class:`DateOffset` objects render more simply, e.g. "<DateOffset: days=1>" instead of "<DateOffset: kwds={'days': 1}>" (:issue:`19403`)
.. _whatsnew_0230.deprecations:
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index e02818dd818df..8caf9ea0e0389 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -302,6 +302,14 @@ class _BaseOffset(object):
_normalize_cache = True
_cacheable = False
_day_opt = None
+ _attributes = frozenset(['n', 'normalize'])
+
+ @property
+ def kwds(self):
+ # for backwards-compatibility
+ kwds = {name: getattr(self, name, None) for name in self._attributes
+ if name not in ['n', 'normalize']}
+ return {name: kwds[name] for name in kwds if kwds[name] is not None}
def __call__(self, other):
return self.apply(other)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 8dd41c022d163..76219a07f4943 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -71,9 +71,11 @@ def f(self):
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
- month_kw = (self.freq.kwds.get('startingMonth',
- self.freq.kwds.get('month', 12))
- if self.freq else 12)
+ freq = self.freq
+ month_kw = 12
+ if freq:
+ kwds = freq.kwds
+ month_kw = kwds.get('startingMonth', kwds.get('month', 12))
result = fields.get_start_end_field(values, field,
self.freqstr, month_kw)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index b086884ecd250..d96ebab615d12 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -218,7 +218,7 @@ def test_offset_freqstr(self, offset_types):
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
- "<DateOffset: kwds={'days': 1}>",
+ "<DateOffset: days=1>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index ec206e0997d0b..2e4be7fbdeebf 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -185,6 +185,8 @@ def __add__(date):
"""
_use_relativedelta = False
_adjust_dst = False
+ _attributes = frozenset(['n', 'normalize'] +
+ list(liboffsets.relativedelta_kwds))
# default for prior pickles
normalize = False
@@ -192,9 +194,9 @@ def __add__(date):
def __init__(self, n=1, normalize=False, **kwds):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = kwds
self._offset, self._use_relativedelta = _determine_offset(kwds)
+ self.__dict__.update(kwds)
@apply_wraps
def apply(self, other):
@@ -238,30 +240,31 @@ def apply_index(self, i):
y : DatetimeIndex
"""
- if not type(self) is DateOffset:
+ if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
+ kwds = self.kwds
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
- set(self.kwds).issubset(relativedelta_fast)):
+ set(kwds).issubset(relativedelta_fast)):
- months = ((self.kwds.get('years', 0) * 12 +
- self.kwds.get('months', 0)) * self.n)
+ months = ((kwds.get('years', 0) * 12 +
+ kwds.get('months', 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
- weeks = (self.kwds.get('weeks', 0)) * self.n
+ weeks = (kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
- timedelta_kwds = {k: v for k, v in self.kwds.items()
+ timedelta_kwds = {k: v for k, v in kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds']}
if timedelta_kwds:
@@ -273,7 +276,7 @@ def apply_index(self, i):
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
- kwd = set(self.kwds) - relativedelta_fast
+ kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd))
@@ -284,7 +287,7 @@ def isAnchored(self):
return (self.n == 1)
def _params(self):
- all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
+ all_paras = self.__dict__.copy()
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name', 'normalize', 'calendar']
@@ -301,15 +304,8 @@ def _repr_attrs(self):
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
- if attr.startswith('_'):
+ if attr.startswith('_') or attr == 'kwds':
continue
- elif attr == 'kwds': # TODO: get rid of this
- kwds_new = {}
- for key in self.kwds:
- if not hasattr(self, key):
- kwds_new[key] = self.kwds[key]
- if len(kwds_new) > 0:
- attrs.append('kwds={kwds_new}'.format(kwds_new=kwds_new))
elif attr not in exclude:
value = getattr(self, attr)
attrs.append('{attr}={value}'.format(attr=attr, value=value))
@@ -427,6 +423,30 @@ def _offset_str(self):
def nanos(self):
raise ValueError("{name} is a non-fixed frequency".format(name=self))
+ def __setstate__(self, state):
+ """Reconstruct an instance from a pickled state"""
+ if 'offset' in state:
+ # Older (<0.22.0) versions have offset attribute instead of _offset
+ if '_offset' in state: # pragma: no cover
+ raise AssertionError('Unexpected key `_offset`')
+ state['_offset'] = state.pop('offset')
+ state['kwds']['offset'] = state['_offset']
+
+ if '_offset' in state and not isinstance(state['_offset'], timedelta):
+ # relativedelta, we need to populate using its kwds
+ offset = state['_offset']
+ odict = offset.__dict__
+ kwds = {key: odict[key] for key in odict if odict[key]}
+ state.update(kwds)
+
+ self.__dict__ = state
+ if 'weekmask' in state and 'holidays' in state:
+ calendar, holidays = _get_calendar(weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=None)
+ self.calendar = calendar
+ self.holidays = holidays
+
class SingleConstructorOffset(DateOffset):
@classmethod
@@ -450,10 +470,9 @@ def __init__(self, weekmask, holidays, calendar):
# following two attributes. See DateOffset._params()
# holidays, weekmask
- # assumes self.kwds already exists
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['calendar'] = self.calendar = calendar
+ self.weekmask = weekmask
+ self.holidays = holidays
+ self.calendar = calendar
class BusinessMixin(object):
@@ -490,23 +509,6 @@ def __getstate__(self):
return state
- def __setstate__(self, state):
- """Reconstruct an instance from a pickled state"""
- if 'offset' in state:
- # Older versions have offset attribute instead of _offset
- if '_offset' in state: # pragma: no cover
- raise ValueError('Unexpected key `_offset`')
- state['_offset'] = state.pop('offset')
- state['kwds']['offset'] = state['_offset']
- self.__dict__ = state
- if 'weekmask' in state and 'holidays' in state:
- calendar, holidays = _get_calendar(weekmask=self.weekmask,
- holidays=self.holidays,
- calendar=None)
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['weekmask'] = state['weekmask']
-
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
@@ -514,11 +516,11 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
_prefix = 'B'
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize', 'offset'])
def __init__(self, n=1, normalize=False, offset=timedelta(0)):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {'offset': offset}
self._offset = offset
def _offset_str(self):
@@ -615,10 +617,8 @@ class BusinessHourMixin(BusinessMixin):
def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
- kwds = {'offset': offset}
- self.start = kwds['start'] = liboffsets._validate_business_time(start)
- self.end = kwds['end'] = liboffsets._validate_business_time(end)
- self.kwds.update(kwds)
+ self.start = liboffsets._validate_business_time(start)
+ self.end = liboffsets._validate_business_time(end)
self._offset = offset
@cache_readonly
@@ -843,12 +843,12 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
_prefix = 'BH'
_anchor = 0
+ _attributes = frozenset(['n', 'normalize', 'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {}
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
@@ -872,13 +872,14 @@ class CustomBusinessDay(_CustomMixin, BusinessDay):
"""
_cacheable = False
_prefix = 'C'
+ _attributes = frozenset(['n', 'normalize',
+ 'weekmask', 'holidays', 'calendar', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
self.n = self._validate_n(n)
self.normalize = normalize
self._offset = offset
- self.kwds = {'offset': offset}
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@@ -930,6 +931,9 @@ class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
"""
_prefix = 'CBH'
_anchor = 0
+ _attributes = frozenset(['n', 'normalize',
+ 'weekmask', 'holidays', 'calendar',
+ 'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None,
@@ -937,7 +941,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = self._validate_n(n)
self.normalize = normalize
self._offset = offset
- self.kwds = {'offset': offset}
_CustomMixin.__init__(self, weekmask, holidays, calendar)
BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
@@ -949,11 +952,11 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {}
@property
def name(self):
@@ -1024,6 +1027,8 @@ class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
+ _attributes = frozenset(['n', 'normalize',
+ 'weekmask', 'holidays', 'calendar', 'offset'])
onOffset = DateOffset.onOffset # override MonthOffset method
apply_index = DateOffset.apply_index # override MonthOffset method
@@ -1033,7 +1038,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = self._validate_n(n)
self.normalize = normalize
self._offset = offset
- self.kwds = {'offset': offset}
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@@ -1102,6 +1106,7 @@ class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
_min_day_of_month = 2
+ _attributes = frozenset(['n', 'normalize', 'day_of_month'])
def __init__(self, n=1, normalize=False, day_of_month=None):
if day_of_month is None:
@@ -1115,7 +1120,6 @@ def __init__(self, n=1, normalize=False, day_of_month=None):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {'day_of_month': self.day_of_month}
@classmethod
def _from_name(cls, suffix=None):
@@ -1319,6 +1323,7 @@ class Week(DateOffset):
_adjust_dst = True
_inc = timedelta(weeks=1)
_prefix = 'W'
+ _attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=None):
self.n = self._validate_n(n)
@@ -1330,8 +1335,6 @@ def __init__(self, n=1, normalize=False, weekday=None):
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
- self.kwds = {'weekday': weekday}
-
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@@ -1450,6 +1453,7 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
_prefix = 'WOM'
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize', 'week', 'weekday'])
def __init__(self, n=1, normalize=False, week=0, weekday=0):
self.n = self._validate_n(n)
@@ -1467,8 +1471,6 @@ def __init__(self, n=1, normalize=False, week=0, weekday=0):
raise ValueError('Week must be 0<=week<=3, got {week}'
.format(week=self.week))
- self.kwds = {'weekday': weekday, 'week': week}
-
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
@@ -1526,6 +1528,7 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
_prefix = 'LWOM'
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=0):
self.n = self._validate_n(n)
@@ -1539,8 +1542,6 @@ def __init__(self, n=1, normalize=False, weekday=0):
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
- self.kwds = {'weekday': weekday}
-
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
@@ -1584,6 +1585,7 @@ class QuarterOffset(DateOffset):
_default_startingMonth = None
_from_name_startingMonth = None
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize', 'startingMonth'])
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point. Also apply_index, onOffset, rule_code if
# startingMonth vs month attr names are resolved
@@ -1595,8 +1597,6 @@ def __init__(self, n=1, normalize=False, startingMonth=None):
startingMonth = self._default_startingMonth
self.startingMonth = startingMonth
- self.kwds = {'startingMonth': startingMonth}
-
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@@ -1690,6 +1690,7 @@ class QuarterBegin(QuarterOffset):
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize', 'month'])
def _get_offset_day(self, other):
# override BaseOffset method to use self.month instead of other.month
@@ -1725,8 +1726,6 @@ def __init__(self, n=1, normalize=False, month=None):
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
- self.kwds = {'month': month}
-
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
@@ -1811,6 +1810,7 @@ class FY5253(DateOffset):
"""
_prefix = 'RE'
_adjust_dst = True
+ _attributes = frozenset(['weekday', 'startingMonth', 'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
variation="nearest"):
@@ -1821,9 +1821,6 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
self.variation = variation
- self.kwds = {'weekday': weekday, 'startingMonth': startingMonth,
- 'variation': variation}
-
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -2012,6 +2009,8 @@ class FY5253Quarter(DateOffset):
_prefix = 'REQ'
_adjust_dst = True
+ _attributes = frozenset(['weekday', 'startingMonth', 'qtr_with_extra_week',
+ 'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
qtr_with_extra_week=1, variation="nearest"):
@@ -2023,10 +2022,6 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
self.qtr_with_extra_week = qtr_with_extra_week
self.variation = variation
- self.kwds = {'weekday': weekday, 'startingMonth': startingMonth,
- 'qtr_with_extra_week': qtr_with_extra_week,
- 'variation': variation}
-
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -2170,11 +2165,11 @@ class Easter(DateOffset):
1583-4099.
"""
_adjust_dst = True
+ _attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {}
@apply_wraps
def apply(self, other):
@@ -2217,12 +2212,12 @@ def f(self, other):
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
_prefix = 'undefined'
+ _attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
# TODO: do Tick classes with normalize=True make sense?
self.n = self._validate_n(n)
self.normalize = normalize
- self.kwds = {}
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
| Returning to an older goal of making DateOffset immutable, this PR moves towards getting rid of `DateOffset.kwds` by making it a property instead of regular attribute. This uses the `_get_attributes_dict` pattern, albeit without actually using a `_get_attributes_dict` method.
I expect this to entail a small perf penalty since lookups are slower, but that's small potatoes next to the speedups we'll get from caching once these are immutable.
```
asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries
[...]
before after ratio
[d3f7d2a6] [fe7a7187]
+ 11.46μs 41.89μs 3.66 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 2.03ms 5.53ms 2.72 timeseries.ResampleSeries.time_resample('datetime', '5min', 'mean')
+ 2.15ms 4.68ms 2.17 timeseries.ResampleSeries.time_resample('datetime', '1D', 'ohlc')
+ 14.36μs 29.72μs 2.07 offset.OffestDatetimeArithmetic.time_add_10(<YearEnd: month=12>)
+ 2.56ms 4.42ms 1.73 timeseries.ToDatetimeCache.time_dup_string_tzoffset_dates(True)
+ 11.83μs 18.85μs 1.59 offset.OffestDatetimeArithmetic.time_add(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 9.60μs 14.96μs 1.56 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 28.55μs 42.49μs 1.49 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 12.76μs 18.56μs 1.45 offset.OffestDatetimeArithmetic.time_add(<BusinessYearEnd: month=12>)
+ 7.69μs 10.75μs 1.40 timeseries.AsOf.time_asof_single_early('Series')
+ 8.71ms 11.63ms 1.34 timeseries.ResampleSeries.time_resample('period', '5min', 'mean')
+ 15.21μs 20.20μs 1.33 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>)
+ 14.20μs 18.47μs 1.30 offset.OffestDatetimeArithmetic.time_add_10(<BusinessQuarterEnd: startingMonth=3>)
+ 17.44μs 21.72μs 1.25 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthBegin: day_of_month=15>)
+ 11.22μs 13.54μs 1.21 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>)
+ 13.63μs 16.37μs 1.20 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthBegin>)
+ 10.22μs 12.27μs 1.20 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>)
+ 65.62μs 78.64μs 1.20 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 14.46μs 17.28μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<QuarterBegin: startingMonth=3>)
+ 16.11μs 19.14μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<BusinessDay>)
+ 16.31μs 19.07μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearBegin: month=1>)
+ 2.35ms 2.73ms 1.16 timeseries.DatetimeIndex.time_unique('tz_naive')
+ 15.43μs 17.89μs 1.16 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthBegin>)
+ 10.68μs 12.38μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>)
+ 10.57μs 12.24μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthEnd>)
+ 14.26μs 16.48μs 1.16 offset.OffestDatetimeArithmetic.time_add_10(<MonthBegin>)
+ 121.98μs 140.64μs 1.15 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: kwds={'months': 2, 'days': 2}>)
+ 16.60μs 19.11μs 1.15 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearEnd: month=12>)
+ 15.66μs 18.00μs 1.15 offset.OffestDatetimeArithmetic.time_subtract(<YearEnd: month=12>)
+ 16.78μs 19.25μs 1.15 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>)
+ 13.87μs 15.89μs 1.15 offset.OffestDatetimeArithmetic.time_add_10(<BusinessYearEnd: month=12>)
+ 14.96μs 17.13μs 1.14 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterBegin: startingMonth=3>)
+ 15.27μs 17.43μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>)
+ 12.88μs 14.66μs 1.14 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessDay>)
+ 251.65μs 285.47μs 1.13 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<BusinessQuarterEnd: startingMonth=3>)
+ 13.84μs 15.66μs 1.13 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>)
+ 14.68ms 16.59ms 1.13 timeseries.DatetimeIndex.time_to_time('tz_naive')
+ 16.66μs 18.79μs 1.13 offset.OffestDatetimeArithmetic.time_subtract_10(<YearEnd: month=12>)
+ 10.00μs 11.19μs 1.12 offset.OffestDatetimeArithmetic.time_apply(<BusinessYearBegin: month=1>)
+ 10.30μs 11.50μs 1.12 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearEnd: month=12>)
+ 17.44μs 19.47μs 1.12 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthBegin: day_of_month=15>)
+ 18.07μs 20.13μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthEnd: day_of_month=15>)
+ 18.09μs 20.08μs 1.11 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessDay>)
+ 14.65μs 16.26μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessMonthEnd>)
+ 5.32ms 5.91ms 1.11 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<SemiMonthBegin: day_of_month=15>)
+ 11.54μs 12.80μs 1.11 offset.OffestDatetimeArithmetic.time_add(<MonthBegin>)
+ 12.23μs 13.55μs 1.11 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>)
+ 15.25μs 16.87μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthEnd>)
+ 17.78μs 19.66μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessDay>)
+ 18.13μs 20.05μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterEnd: startingMonth=3>)
+ 21.89μs 24.19μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<Day>)
+ 9.94μs 10.97μs 1.10 offset.OffestDatetimeArithmetic.time_apply(<SemiMonthEnd: day_of_month=15>)
+ 10.74μs 11.85μs 1.10 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearBegin: month=1>)
+ 18.51μs 20.39μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthEnd: day_of_month=15>)
+ 39.52ms 43.52ms 1.10 timeseries.Factorize.time_factorize('Asia/Tokyo')
- 16.22μs 14.75μs 0.91 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>)
- 73.13ms 65.36ms 0.89 timeseries.ToDatetimeCache.time_dup_string_tzoffset_dates(False)
- 13.39μs 11.60μs 0.87 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessQuarterBegin: startingMonth=3>)
- 25.04μs 21.37μs 0.85 offset.OffestDatetimeArithmetic.time_add_10(<Day>)
- 14.25μs 11.66μs 0.82 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<QuarterEnd: startingMonth=3>)
- 44.47ms 34.93ms 0.79 timeseries.Factorize.time_factorize(None)
- 1.98ms 1.45ms 0.73 timeseries.ToDatetimeCache.time_dup_string_dates(False)
- 4.23ms 1.69ms 0.40 timeseries.ResampleSeries.time_resample('datetime', '1D', 'mean')
asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries
[...]
+ 17.01μs 35.30μs 2.08 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterBegin: startingMonth=3>)
+ 3.02ms 4.90ms 1.62 timeseries.AsOf.time_asof_nan_single('DataFrame')
+ 77.45ms 124.97ms 1.61 timeseries.Factorize.time_factorize('Asia/Tokyo')
+ 12.33μs 19.34μs 1.57 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>)
+ 9.65μs 14.70μs 1.52 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>)
+ 11.65μs 17.30μs 1.48 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>)
+ 27.81μs 40.60μs 1.46 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>)
+ 71.60μs 91.28μs 1.27 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessMonthEnd>)
+ 92.47μs 113.39μs 1.23 offset.OffestDatetimeArithmetic.time_subtract(<CustomBusinessMonthBegin>)
+ 18.89μs 23.02μs 1.22 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<Day>)
+ 10.49μs 12.66μs 1.21 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthBegin>)
+ 16.88μs 20.14μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>)
+ 15.45μs 18.27μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>)
+ 8.37μs 9.90μs 1.18 offset.OffestDatetimeArithmetic.time_apply(<MonthBegin>)
+ 62.24μs 73.22μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>)
+ 21.38μs 24.98μs 1.17 offset.OffestDatetimeArithmetic.time_add_10(<Day>)
+ 16.28μs 18.91μs 1.16 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>)
+ 12.20μs 14.11μs 1.16 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>)
+ 11.51μs 13.30μs 1.16 offset.OffestDatetimeArithmetic.time_add(<BusinessYearBegin: month=1>)
+ 12.89μs 14.83μs 1.15 offset.OffestDatetimeArithmetic.time_add(<SemiMonthBegin: day_of_month=15>)
+ 14.39μs 16.51μs 1.15 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>)
+ 10.50μs 12.03μs 1.15 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>)
+ 2.32ms 2.66ms 1.14 timeseries.DatetimeIndex.time_unique('tz_naive')
+ 15.92μs 18.08μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>)
+ 123.41μs 140.13μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>)
+ 71.20ms 80.52ms 1.13 offset.OffsetSeriesArithmetic.time_add_offset(<CustomBusinessMonthEnd>)
+ 17.68μs 19.89μs 1.13 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearBegin: month=1>)
+ 15.36μs 17.23μs 1.12 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>)
+ 15.98μs 17.82μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthBegin: day_of_month=15>)
+ 14.64μs 16.31μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessMonthEnd>)
+ 8.96μs 9.94μs 1.11 offset.OffestDatetimeArithmetic.time_apply(<BusinessYearEnd: month=12>)
+ 17.27μs 19.16μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<YearEnd: month=12>)
+ 14.44μs 16.01μs 1.11 offset.OffestDatetimeArithmetic.time_subtract(<BusinessYearBegin: month=1>)
+ 15.26μs 16.90μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>)
+ 14.19μs 15.70μs 1.11 offset.OffestDatetimeArithmetic.time_subtract(<MonthBegin>)
+ 13.72μs 15.16μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>)
+ 127.38μs 140.62μs 1.10 offset.OffestDatetimeArithmetic.time_add_10(<CustomBusinessMonthBegin>)
+ 13.77μs 15.20μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthEnd>)
- 17.66μs 15.89μs 0.90 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthEnd>)
- 4.07ms 3.66ms 0.90 timeseries.ToDatetimeISO8601.time_iso8601_nosep
- 25.01μs 22.41μs 0.90 offset.OffestDatetimeArithmetic.time_subtract_10(<Day>)
- 11.44μs 10.22μs 0.89 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>)
- 18.64μs 16.19μs 0.87 timeseries.AsOf.time_asof_single('Series')
- 92.96ms 77.71ms 0.84 timeseries.Factorize.time_factorize(None)
- 23.06μs 19.02μs 0.83 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearEnd: month=12>)
- 13.05μs 10.37μs 0.79 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearBegin: month=1>)
- 23.95μs 14.14μs 0.59 offset.OffestDatetimeArithmetic.time_add(<MonthBegin>)
- 444.49μs 260.00μs 0.58 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<BusinessYearEnd: month=12>)
- 4.31ms 2.46ms 0.57 timeseries.ToDatetimeCache.time_unique_seconds_and_unit(False)
- 21.43μs 11.69μs 0.55 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>)
asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries
[...]
before after ratio
[d3f7d2a6] [fe7a7187]
+ 16.58μs 26.15μs 1.58 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterEnd: startingMonth=3>)
+ 9.75μs 14.73μs 1.51 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>)
+ 12.16μs 18.11μs 1.49 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>)
+ 12.21μs 17.69μs 1.45 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>)
+ 28.97μs 40.89μs 1.41 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>)
+ 1.63ms 2.25ms 1.38 timeseries.ResampleSeries.time_resample('datetime', '1D', 'mean')
+ 14.71μs 20.02μs 1.36 offset.OffestDatetimeArithmetic.time_subtract(<BusinessYearBegin: month=1>)
+ 14.63μs 19.70μs 1.35 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthBegin>)
+ 19.52μs 26.26μs 1.35 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessDay>)
+ 22.16ms 29.79ms 1.34 offset.OffsetSeriesArithmetic.time_add_offset(<CustomBusinessDay>)
+ 20.69ms 26.91ms 1.30 timeseries.IrregularOps.time_add
+ 16.77μs 20.77μs 1.24 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>)
+ 15.76μs 19.40μs 1.23 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>)
+ 17.31μs 21.15μs 1.22 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>)
+ 15.31μs 18.65μs 1.22 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterBegin: startingMonth=3>)
+ 14.71μs 17.75μs 1.21 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>)
+ 15.88μs 19.10μs 1.20 offset.OffestDatetimeArithmetic.time_subtract(<BusinessDay>)
+ 6.38ms 7.55ms 1.18 timeseries.AsOf.time_asof('DataFrame')
+ 16.25μs 18.97μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterEnd: startingMonth=3>)
+ 66.50μs 76.79μs 1.15 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<CustomBusinessMonthEnd>)
+ 19.02μs 21.70μs 1.14 offset.OffestDatetimeArithmetic.time_add(<Day>)
+ 13.95μs 15.83μs 1.13 offset.OffestDatetimeArithmetic.time_subtract(<MonthEnd>)
+ 18.43μs 20.88μs 1.13 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<Day>)
+ 64.88μs 72.92μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>)
+ 12.91μs 14.43μs 1.12 offset.OffestDatetimeArithmetic.time_add(<QuarterEnd: startingMonth=3>)
+ 15.23μs 16.99μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>)
+ 13.70μs 15.24μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<YearEnd: month=12>)
+ 131.58μs 145.99μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>)
+ 11.07μs 12.26μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessQuarterBegin: startingMonth=3>)
+ 11.14μs 12.34μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthBegin>)
+ 11.60μs 12.84μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthEnd>)
+ 2.44ms 2.70ms 1.11 timeseries.DatetimeIndex.time_unique('tz_naive')
+ 15.79μs 17.46μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>)
+ 21.47μs 23.70μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<Day>)
+ 10.57μs 11.64μs 1.10 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthBegin>)
+ 2.98μs 3.28μs 1.10 timeseries.DatetimeIndex.time_get('repeated')
- 10.04μs 9.07μs 0.90 offset.OffestDatetimeArithmetic.time_apply(<BusinessMonthBegin>)
- 12.54μs 11.29μs 0.90 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>)
- 10.30μs 9.25μs 0.90 offset.OffestDatetimeArithmetic.time_apply(<QuarterBegin: startingMonth=3>)
- 16.50μs 14.67μs 0.89 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>)
- 8.11ms 6.98ms 0.86 timeseries.Factorize.time_factorize(None)
- 7.85ms 6.72ms 0.86 timeseries.Factorize.time_factorize('Asia/Tokyo')
- 20.75μs 13.16μs 0.63 offset.OffestDatetimeArithmetic.time_add(<MonthEnd>)
- 2.00ms 1.21ms 0.60 timeseries.ResampleDataFrame.time_method('mean')
- 3.99ms 2.34ms 0.59 timeseries.ToDatetimeCache.time_dup_string_dates(True)
- 19.81μs 11.38μs 0.57 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearBegin: month=1>)
- 30.71μs 12.91μs 0.42 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>)
asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries
[...]
before after ratio
[d3f7d2a6] [fe7a7187]
+ 13.89μs 26.44μs 1.90 offset.OffestDatetimeArithmetic.time_add(<BusinessMonthEnd>)
+ 9.67μs 15.55μs 1.61 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>)
+ 11.64μs 17.55μs 1.51 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>)
+ 12.28μs 17.62μs 1.43 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>)
+ 30.04μs 40.47μs 1.35 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>)
+ 14.31μs 17.84μs 1.25 offset.OffestDatetimeArithmetic.time_subtract(<YearBegin: month=1>)
+ 14.24μs 17.55μs 1.23 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>)
+ 16.80μs 20.62μs 1.23 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthEnd: day_of_month=15>)
+ 15.42μs 18.40μs 1.19 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>)
+ 61.16μs 72.26μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>)
+ 14.30μs 16.82μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<BusinessQuarterEnd: startingMonth=3>)
+ 14.67μs 17.24μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>)
+ 9.54μs 11.21μs 1.17 offset.OffestDatetimeArithmetic.time_apply(<QuarterEnd: startingMonth=3>)
+ 17.11μs 20.00μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>)
+ 15.54μs 18.01μs 1.16 offset.OffestDatetimeArithmetic.time_apply(<CustomBusinessDay>)
+ 10.34μs 11.95μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>)
+ 123.20μs 141.04μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>)
+ 13.93μs 15.85μs 1.14 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>)
+ 11.39μs 12.85μs 1.13 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>)
+ 16.79μs 18.84μs 1.12 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterEnd: startingMonth=3>)
+ 2.35ms 2.64ms 1.12 timeseries.DatetimeIndex.time_unique('tz_naive')
+ 4.16μs 4.67μs 1.12 timeseries.DatetimeIndex.time_get('dst')
+ 6.03μs 6.73μs 1.12 timeseries.DatetimeIndex.time_get('tz_aware')
+ 16.77μs 18.74μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>)
+ 10.47μs 11.69μs 1.12 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>)
+ 12.36μs 13.67μs 1.11 offset.OffestDatetimeArithmetic.time_add(<YearBegin: month=1>)
+ 9.49μs 10.49μs 1.11 offset.OffestDatetimeArithmetic.time_apply(<BusinessQuarterEnd: startingMonth=3>)
+ 105.28μs 116.31μs 1.10 offset.OffestDatetimeArithmetic.time_apply(<CustomBusinessMonthBegin>)
+ 18.86μs 20.84μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthEnd: day_of_month=15>)
+ 14.58μs 16.07μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<MonthBegin>)
- 4.33ms 3.93ms 0.91 offset.OnOffset.time_on_offset(<CustomBusinessMonthBegin>)
- 2.30ms 2.05ms 0.89 timeseries.ResampleSeries.time_resample('datetime', '1D', 'ohlc')
- 148.28ms 131.47ms 0.89 timeseries.DatetimeIndex.time_to_pydatetime('tz_aware')
- 10.14μs 8.93μs 0.88 offset.OffestDatetimeArithmetic.time_apply(<MonthEnd>)
- 13.64μs 11.91μs 0.87 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<SemiMonthBegin: day_of_month=15>)
- 32.12μs 27.72μs 0.86 offset.OffestDatetimeArithmetic.time_add_10(<CustomBusinessDay>)
- 16.82μs 13.94μs 0.83 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>)
- 3.27μs 2.59μs 0.79 timeseries.DatetimeIndex.time_get('repeated')
- 313.89μs 235.57μs 0.75 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<QuarterEnd: startingMonth=3>)
- 23.48μs 15.20μs 0.65 offset.OffestDatetimeArithmetic.time_add(<BusinessYearEnd: month=12>)
- 3.96ms 2.55ms 0.64 timeseries.ResampleSeries.time_resample('period', '5min', 'ohlc')
- 23.80μs 13.81μs 0.58 offset.OffestDatetimeArithmetic.time_add(<BusinessDay>)
- 3.13ms 1.45ms 0.46 timeseries.ToDatetimeCache.time_dup_string_dates(False)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/19403 | 2018-01-26T00:07:48Z | 2018-02-02T11:32:49Z | 2018-02-02T11:32:49Z | 2018-02-04T16:42:54Z |
DOC: correct merge_ordered example | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8ee30bf72d313..99ea2c4fe4688 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -194,19 +194,17 @@ def merge_ordered(left, right, on=None,
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
- key lvalue group rvalue
- 0 a 1 a NaN
- 1 b 1 a 1
- 2 c 2 a 2
- 3 d 2 a 3
- 4 e 3 a 3
- 5 f 3 a 4
- 6 a 1 b NaN
- 7 b 1 b 1
- 8 c 2 b 2
- 9 d 2 b 3
- 10 e 3 b 3
- 11 f 3 b 4
+ group key lvalue rvalue
+ 0 a a 1 NaN
+ 1 a b 1 1.0
+ 2 a c 2 2.0
+ 3 a d 2 3.0
+ 4 a e 3 3.0
+ 5 b a 1 NaN
+ 6 b b 1 1.0
+ 7 b c 2 2.0
+ 8 b d 2 3.0
+ 9 b e 3 3.0
Returns
-------
diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py
index a4c8793cc0ade..31c484a483d18 100644
--- a/pandas/tests/reshape/merge/test_merge_ordered.py
+++ b/pandas/tests/reshape/merge/test_merge_ordered.py
@@ -81,3 +81,21 @@ def test_empty_sequence_concat(self):
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
+
+ def test_doc_example(self):
+ left = DataFrame({'key': ['a', 'c', 'e', 'a', 'c', 'e'],
+ 'lvalue': [1, 2, 3] * 2,
+ 'group': list('aaabbb')})
+
+ right = DataFrame({'key': ['b', 'c', 'd'],
+ 'rvalue': [1, 2, 3]})
+
+ result = merge_ordered(left, right, fill_method='ffill',
+ left_by='group')
+
+ expected = DataFrame({'group': list('aaaaabbbbb'),
+ 'key': ['a', 'b', 'c', 'd', 'e'] * 2,
+ 'lvalue': [1, 1, 2, 2, 3] * 2,
+ 'rvalue': [nan, 1, 2, 3, 3] * 2})
+
+ assert_frame_equal(result, expected)
| - update of incorrect documentation example for merge_ordered.
- adding a test corresponding to this example.
- [ ] closes #19393
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19401 | 2018-01-25T22:41:27Z | 2018-01-26T11:23:45Z | 2018-01-26T11:23:45Z | 2018-01-26T11:23:46Z |
DOC: changes to use code-block declaration | diff --git a/doc/source/developer.rst b/doc/source/developer.rst
index 5c3b114ce7299..0ef097da090f2 100644
--- a/doc/source/developer.rst
+++ b/doc/source/developer.rst
@@ -153,7 +153,7 @@ Libraries can use the decorators
pandas objects. All of these follow a similar convention: you decorate a class, providing the name of attribute to add. The
class's `__init__` method gets the object being decorated. For example:
-.. ipython:: python
+.. code-block:: python
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor(object):
| - [ ] closes #19400
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19400 | 2018-01-25T21:57:20Z | 2018-01-26T11:18:38Z | 2018-01-26T11:18:37Z | 2018-01-26T11:18:38Z |
Fix invalid relativedelta_kwds | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 7509c502f27ed..473a4bb72e6d9 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1245,7 +1245,6 @@ Offsets
- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`)
- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operations (:issue:`14774`)
-
Numeric
^^^^^^^
- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b015495b095b6..59589478f48f5 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -402,7 +402,8 @@ Timezones
Offsets
^^^^^^^
--
+- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`)
+- Bug in :class:`DateOffset` where keyword arguments ``week`` and ``milliseconds`` were accepted and ignored. Passing these will now raise ``ValueError`` (:issue:`19398`)
-
Numeric
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 7881529f04ed3..3ba2270a851d5 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -254,10 +254,10 @@ def _validate_business_time(t_input):
relativedelta_kwds = set([
'years', 'months', 'weeks', 'days',
- 'year', 'month', 'week', 'day', 'weekday',
+ 'year', 'month', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond',
'nanosecond', 'nanoseconds',
- 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'])
+ 'hours', 'minutes', 'seconds', 'microseconds'])
def _determine_offset(kwds):
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index db69bfadfcf49..35ee0d37e2b1a 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -3085,6 +3085,13 @@ def test_valid_month_attributes(kwd, month_classes):
cls(**{kwd: 3})
+@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
+def test_valid_relativedelta_kwargs(kwd):
+ # Check that all the arguments specified in liboffsets.relativedelta_kwds
+ # are in fact valid relativedelta keyword args
+ DateOffset(**{kwd: 1})
+
+
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 5d076bf33a8ac..dd4356aac1cd5 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -158,6 +158,54 @@ def __add__(date):
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
+
+ Parameters
+ ----------
+ n : int, default 1
+ The number of time periods the offset represents.
+ normalize : bool, default False
+ Whether to round the result of a DateOffset addition down to the
+ previous midnight.
+ **kwds
+ Temporal parameter that add to or replace the offset value.
+
+ Parameters that **add** to the offset (like Timedelta):
+
+ - years
+ - months
+ - weeks
+ - days
+ - hours
+ - minutes
+ - seconds
+ - microseconds
+ - nanoseconds
+
+ Parameters that **replace** the offset value:
+
+ - year
+ - month
+ - day
+ - weekday
+ - hour
+ - minute
+ - second
+ - microsecond
+ - nanosecond
+
+ See Also
+ --------
+ dateutil.relativedelta.relativedelta
+
+ Examples
+ --------
+ >>> ts = pd.Timestamp('2017-01-01 09:10:11')
+ >>> ts + DateOffset(months=3)
+ Timestamp('2017-04-01 09:10:11')
+
+ >>> ts = pd.Timestamp('2017-01-01 09:10:11')
+ >>> ts + DateOffset(month=3)
+ Timestamp('2017-03-01 09:10:11')
"""
_params = cache_readonly(BaseOffset._params.fget)
_use_relativedelta = False
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19398 | 2018-01-25T19:16:02Z | 2018-07-20T13:29:26Z | 2018-07-20T13:29:26Z | 2020-04-05T17:40:50Z |
Remove unused Index attributes | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 74c6abeb0ad12..626f3dc86556a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -141,12 +141,10 @@ class Index(IndexOpsMixin, PandasObject):
_join_precedence = 1
# Cython methods
- _arrmap = libalgos.arrmap_object
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
- _box_scalars = False
_typ = 'index'
_data = None
@@ -155,9 +153,6 @@ class Index(IndexOpsMixin, PandasObject):
asi8 = None
_comparables = ['name']
_attributes = ['name']
- _allow_index_ops = True
- _allow_datetime_index_ops = False
- _allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index afc86a51c02b4..8dd41c022d163 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -282,7 +282,6 @@ def _join_i8_wrapper(joinf, **kwargs):
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
- _arrmap = None
@classmethod
def _add_comparison_methods(cls):
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 232770e582763..3bf783b5a2faa 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -207,7 +207,6 @@ class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
- _allow_index_ops = True
# we would like our indexing holder to defer to us
_defer_to_indexing = True
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 5e6ebb7588ab9..b02aee0495d8c 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,6 +1,6 @@
import numpy as np
from pandas._libs import (index as libindex,
- algos as libalgos, join as libjoin)
+ join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
@@ -158,7 +158,6 @@ class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
- _arrmap = libalgos.arrmap_int64
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
@@ -217,7 +216,6 @@ class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
- _arrmap = libalgos.arrmap_uint64
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
@@ -296,7 +294,6 @@ class Float64Index(NumericIndex):
_typ = 'float64index'
_engine_type = libindex.Float64Engine
- _arrmap = libalgos.arrmap_float64
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 8b35b1a231551..1f8542ed5ee60 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -204,7 +204,6 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
DatetimeIndex : Index with datetime64 data
TimedeltaIndex : Index of timedelta64 data
"""
- _box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index b88ee88210cfe..4b543262fc485 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -170,7 +170,6 @@ def _join_i8_wrapper(joinf, **kwargs):
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
- _arrmap = None
# define my properties & methods for delegation
_other_ops = []
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a14eb69d86377..78b4c3a70a519 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -144,7 +144,6 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_deprecations = generic.NDFrame._deprecations | frozenset(
['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
'from_csv', 'valid'])
- _allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index c468908db5449..df2547fc7b0da 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -265,8 +265,8 @@ class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
- self.is_valid_objs = [o for o in self.objs if o._allow_index_ops]
- self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops]
+ self.is_valid_objs = self.objs
+ self.not_valid_objs = []
def test_none_comparison(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/19397 | 2018-01-25T19:11:18Z | 2018-01-27T01:12:24Z | 2018-01-27T01:12:24Z | 2018-01-31T06:49:27Z | |
Centralize ops kwarg specification | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 3db2dd849ccee..ba8a15b60ba56 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -39,7 +39,8 @@
ABCSeries,
ABCDataFrame,
ABCIndex,
- ABCPeriodIndex)
+ ABCPeriodIndex,
+ ABCSparseSeries)
def _gen_eval_kwargs(name):
@@ -109,6 +110,31 @@ def _gen_fill_zeros(name):
return fill_value
+def _get_frame_op_default_axis(name):
+ """
+ Only DataFrame cares about default_axis, specifically:
+ special methods have default_axis=None and flex methods
+ have default_axis='columns'.
+
+ Parameters
+ ----------
+ name : str
+
+ Returns
+ -------
+ default_axis: str or None
+ """
+ if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']:
+ # bool methods
+ return 'columns'
+ elif name.startswith('__'):
+ # __add__, __mul__, ...
+ return None
+ else:
+ # add, mul, ...
+ return 'columns'
+
+
# -----------------------------------------------------------------------------
# Docstring Generation and Templates
@@ -281,17 +307,17 @@ def _gen_fill_zeros(name):
_agg_doc_PANEL = """
-Wrapper method for {wrp_method}
+Wrapper method for {op_name}
Parameters
----------
-other : {construct} or {cls_name}
-axis : {{{axis_order}}}
+other : DataFrame or Panel
+axis : {{items, major_axis, minor_axis}}
Axis to broadcast over
Returns
-------
-{cls_name}
+Panel
"""
@@ -337,14 +363,18 @@ def _make_flex_doc(op_name, typ):
# methods
-def _create_methods(arith_method, comp_method, bool_method,
- use_numexpr, special=False, default_axis='columns',
- have_divmod=False):
+def _create_methods(cls, arith_method, comp_method, bool_method,
+ special=False):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
- # NOTE: Only frame cares about default_axis, specifically: special methods
- # have default axis None, whereas flex methods have default axis 'columns'
+ # numexpr is available for non-sparse classes
+ subtyp = getattr(cls, '_subtyp', '')
+ use_numexpr = 'sparse' not in subtyp
+
+ have_divmod = issubclass(cls, ABCSeries)
+ # divmod is available for Series and SparseSeries
+
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
@@ -360,44 +390,28 @@ def names(x):
else:
names = lambda x: x
- # Inframe, all special methods have default_axis=None, flex methods have
- # default_axis set to the default (columns)
# yapf: disable
new_methods = dict(
- add=arith_method(operator.add, names('add'), op('+'),
- default_axis=default_axis),
- radd=arith_method(lambda x, y: y + x, names('radd'), op('+'),
- default_axis=default_axis),
- sub=arith_method(operator.sub, names('sub'), op('-'),
- default_axis=default_axis),
- mul=arith_method(operator.mul, names('mul'), op('*'),
- default_axis=default_axis),
- truediv=arith_method(operator.truediv, names('truediv'), op('/'),
- default_axis=default_axis),
- floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
- default_axis=default_axis),
+ add=arith_method(operator.add, names('add'), op('+')),
+ radd=arith_method(lambda x, y: y + x, names('radd'), op('+')),
+ sub=arith_method(operator.sub, names('sub'), op('-')),
+ mul=arith_method(operator.mul, names('mul'), op('*')),
+ truediv=arith_method(operator.truediv, names('truediv'), op('/')),
+ floordiv=arith_method(operator.floordiv, names('floordiv'), op('//')),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
- mod=arith_method(operator.mod, names('mod'), None,
- default_axis=default_axis),
- pow=arith_method(operator.pow, names('pow'), op('**'),
- default_axis=default_axis),
+ mod=arith_method(operator.mod, names('mod'), None),
+ pow=arith_method(operator.pow, names('pow'), op('**')),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
- rmul=arith_method(operator.mul, names('rmul'), op('*'),
- default_axis=default_axis),
- rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
- default_axis=default_axis),
+ rmul=arith_method(operator.mul, names('rmul'), op('*')),
+ rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-')),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
- names('rtruediv'), op('/'),
- default_axis=default_axis),
+ names('rtruediv'), op('/')),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
- names('rfloordiv'), op('//'),
- default_axis=default_axis),
- rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
- default_axis=default_axis),
- rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
- default_axis=default_axis))
+ names('rfloordiv'), op('//')),
+ rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**')),
+ rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%')))
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
@@ -425,10 +439,7 @@ def names(x):
names('rxor'), op('^'))))
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
- new_methods['divmod'] = arith_method(divmod,
- names('divmod'),
- None,
- default_axis=default_axis)
+ new_methods['divmod'] = arith_method(divmod, names('divmod'), None)
new_methods = {names(k): v for k, v in new_methods.items()}
return new_methods
@@ -444,8 +455,7 @@ def add_methods(cls, new_methods, force):
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
- use_numexpr=True, force=False,
- have_divmod=False):
+ force=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
@@ -454,27 +464,17 @@ def add_special_arithmetic_methods(cls, arith_method=None,
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
- f(op, name, str_rep, default_axis=None)
+ f(op, name, str_rep)
comp_method : function (optional)
factory for rich comparison - signature: f(op, name, str_rep)
bool_method : function (optional)
factory for boolean methods - signature: f(op, name, str_rep)
- use_numexpr : bool, default True
- whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
- have_divmod : bool, (optional)
- should a divmod method be added? this method is special because it
- returns a tuple of cls instead of a single element of type cls
"""
-
- # in frame, special methods have default_axis = None, comp methods use
- # 'columns'
-
- new_methods = _create_methods(arith_method, comp_method,
- bool_method, use_numexpr, default_axis=None,
- special=True, have_divmod=have_divmod)
+ new_methods = _create_methods(cls, arith_method, comp_method, bool_method,
+ special=True)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
@@ -517,7 +517,7 @@ def f(self, other):
def add_flex_arithmetic_methods(cls, flex_arith_method,
flex_comp_method=None, flex_bool_method=None,
- use_numexpr=True, force=False):
+ force=False):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
@@ -525,20 +525,16 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
Parameters
----------
flex_arith_method : function
- factory for special arithmetic methods, with op string:
- f(op, name, str_rep, default_axis=None)
+ factory for flex arithmetic methods, with op string:
+ f(op, name, str_rep)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
- use_numexpr : bool, default True
- whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
"""
- # in frame, default axis is 'columns', doesn't matter for series and panel
- new_methods = _create_methods(flex_arith_method,
+ new_methods = _create_methods(cls, flex_arith_method,
flex_comp_method, flex_bool_method,
- use_numexpr, default_axis='columns',
special=False)
new_methods.update(dict(multiply=new_methods['mul'],
subtract=new_methods['sub'],
@@ -597,7 +593,7 @@ def _construct_divmod_result(left, result, index, name, dtype):
)
-def _arith_method_SERIES(op, name, str_rep, default_axis=None):
+def _arith_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
@@ -637,15 +633,9 @@ def safe_na_op(lvalues, rvalues):
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
- if isinstance(rvalues, ABCSeries):
- if is_object_dtype(rvalues):
- # if dtype is object, try elementwise op
- return libalgos.arrmap_object(rvalues,
- lambda x: op(lvalues, x))
- else:
- if is_object_dtype(lvalues):
- return libalgos.arrmap_object(lvalues,
- lambda x: op(x, rvalues))
+ if is_object_dtype(lvalues):
+ return libalgos.arrmap_object(lvalues,
+ lambda x: op(x, rvalues))
raise
def wrapper(left, right, name=name, na_op=na_op):
@@ -671,7 +661,7 @@ def wrapper(left, right, name=name, na_op=na_op):
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
- rvalues = getattr(rvalues, 'values', rvalues)
+ rvalues = rvalues.values
result = safe_na_op(lvalues, rvalues)
return construct_result(left, result,
@@ -933,7 +923,7 @@ def wrapper(self, other):
return wrapper
-def _flex_method_SERIES(op, name, str_rep, default_axis=None):
+def _flex_method_SERIES(op, name, str_rep):
doc = _make_flex_doc(name, 'series')
@Appender(doc)
@@ -964,8 +954,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
series_special_funcs = dict(arith_method=_arith_method_SERIES,
comp_method=_comp_method_SERIES,
- bool_method=_bool_method_SERIES,
- have_divmod=True)
+ bool_method=_bool_method_SERIES)
# -----------------------------------------------------------------------------
@@ -1015,9 +1004,10 @@ def to_series(right):
return right
-def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns'):
+def _arith_method_FRAME(op, name, str_rep=None):
eval_kwargs = _gen_eval_kwargs(name)
fill_zeros = _gen_fill_zeros(name)
+ default_axis = _get_frame_op_default_axis(name)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
@@ -1088,7 +1078,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
return f
-def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns'):
+def _flex_comp_method_FRAME(op, name, str_rep=None):
+ default_axis = _get_frame_op_default_axis(name)
def na_op(x, y):
try:
@@ -1167,8 +1158,7 @@ def f(self, other):
# -----------------------------------------------------------------------------
# Panel
-def _arith_method_PANEL(op, name, str_rep=None, default_axis=None):
-
+def _arith_method_PANEL(op, name, str_rep=None):
# work only for scalars
def f(self, other):
if not is_scalar(other):
@@ -1228,6 +1218,122 @@ def f(self, other, axis=None):
return f
+def _flex_method_PANEL(op, name, str_rep=None):
+ eval_kwargs = _gen_eval_kwargs(name)
+ fill_zeros = _gen_fill_zeros(name)
+
+ def na_op(x, y):
+ import pandas.core.computation.expressions as expressions
+
+ try:
+ result = expressions.evaluate(op, str_rep, x, y,
+ errors='raise',
+ **eval_kwargs)
+ except TypeError:
+ result = op(x, y)
+
+ # handles discrepancy between numpy and numexpr on division/mod
+ # by 0 though, given that these are generally (always?)
+ # non-scalars, I'm not sure whether it's worth it at the moment
+ result = missing.fill_zeros(result, x, y, name, fill_zeros)
+ return result
+
+ if name in _op_descriptions:
+ doc = _make_flex_doc(name, 'panel')
+ else:
+ # doc strings substitors
+ doc = _agg_doc_PANEL.format(op_name=name)
+
+ @Appender(doc)
+ def f(self, other, axis=0):
+ return self._combine(other, na_op, axis=axis)
+
+ f.__name__ = name
+ return f
+
+
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
+
+
+# -----------------------------------------------------------------------------
+# Sparse
+
+
+def _arith_method_SPARSE_SERIES(op, name, str_rep=None):
+ """
+ Wrapper function for Series arithmetic operations, to avoid
+ code duplication.
+
+ str_rep is not used, but is present for compatibility.
+ """
+
+ def wrapper(self, other):
+ if isinstance(other, ABCDataFrame):
+ return NotImplemented
+ elif isinstance(other, ABCSeries):
+ if not isinstance(other, ABCSparseSeries):
+ other = other.to_sparse(fill_value=self.fill_value)
+ return _sparse_series_op(self, other, op, name)
+ elif is_scalar(other):
+ with np.errstate(all='ignore'):
+ new_values = op(self.values, other)
+ return self._constructor(new_values,
+ index=self.index,
+ name=self.name)
+ else: # pragma: no cover
+ raise TypeError('operation with {other} not supported'
+ .format(other=type(other)))
+
+ wrapper.__name__ = name
+ if name.startswith("__"):
+ # strip special method names, e.g. `__add__` needs to be `add` when
+ # passed to _sparse_series_op
+ name = name[2:-2]
+ return wrapper
+
+
+def _sparse_series_op(left, right, op, name):
+ left, right = left.align(right, join='outer', copy=False)
+ new_index = left.index
+ new_name = com._maybe_match_name(left, right)
+
+ from pandas.core.sparse.array import _sparse_array_op
+ result = _sparse_array_op(left.values, right.values, op, name,
+ series=True)
+ return left._constructor(result, index=new_index, name=new_name)
+
+
+def _arith_method_SPARSE_ARRAY(op, name, str_rep=None):
+ """
+ Wrapper function for Series arithmetic operations, to avoid
+ code duplication.
+ """
+
+ def wrapper(self, other):
+ from pandas.core.sparse.array import (
+ SparseArray, _sparse_array_op, _wrap_result, _get_fill)
+ if isinstance(other, np.ndarray):
+ if len(self) != len(other):
+ raise AssertionError("length mismatch: {self} vs. {other}"
+ .format(self=len(self), other=len(other)))
+ if not isinstance(other, SparseArray):
+ dtype = getattr(other, 'dtype', None)
+ other = SparseArray(other, fill_value=self.fill_value,
+ dtype=dtype)
+ return _sparse_array_op(self, other, op, name)
+ elif is_scalar(other):
+ with np.errstate(all='ignore'):
+ fill = op(_get_fill(self), np.asarray(other))
+ result = op(self.sp_values, other)
+
+ return _wrap_result(name, result, self.sp_index, fill)
+ else: # pragma: no cover
+ raise TypeError('operation with {other} not supported'
+ .format(other=type(other)))
+
+ if name.startswith("__"):
+ name = name[2:-2]
+ wrapper.__name__ = name
+ return wrapper
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index afdd9bae3006f..2cb80e938afb9 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -16,7 +16,6 @@
from pandas.core.dtypes.missing import notna
import pandas.core.ops as ops
-import pandas.core.missing as missing
import pandas.core.common as com
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict)
@@ -1521,52 +1520,6 @@ def _extract_axis(self, data, axis=0, intersect=False):
return _ensure_index(index)
- @classmethod
- def _add_aggregate_operations(cls, use_numexpr=True):
- """ add the operations to the cls; evaluate the doc strings again """
-
- def _panel_arith_method(op, name, str_rep=None, default_axis=None):
-
- eval_kwargs = ops._gen_eval_kwargs(name)
- fill_zeros = ops._gen_fill_zeros(name)
-
- def na_op(x, y):
- import pandas.core.computation.expressions as expressions
-
- try:
- result = expressions.evaluate(op, str_rep, x, y,
- errors='raise',
- **eval_kwargs)
- except TypeError:
- result = op(x, y)
-
- # handles discrepancy between numpy and numexpr on division/mod
- # by 0 though, given that these are generally (always?)
- # non-scalars, I'm not sure whether it's worth it at the moment
- result = missing.fill_zeros(result, x, y, name, fill_zeros)
- return result
-
- if name in ops._op_descriptions:
- doc = ops._make_flex_doc(name, 'panel')
- else:
- # doc strings substitors
- doc = ops._agg_doc_PANEL.format(
- construct=cls._constructor_sliced.__name__,
- cls_name=cls.__name__, wrp_method=name,
- axis_order=', '.join(cls._AXIS_ORDERS))
-
- @Appender(doc)
- def f(self, other, axis=0):
- return self._combine(other, na_op, axis=axis)
-
- f.__name__ = name
- return f
-
- # add `div`, `mul`, `pow`, etc..
- ops.add_flex_arithmetic_methods(
- cls, _panel_arith_method, use_numexpr=use_numexpr,
- flex_comp_method=ops._comp_method_PANEL)
-
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
@@ -1575,7 +1528,8 @@ def f(self, other, axis=0):
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
-Panel._add_aggregate_operations()
+ops.add_flex_arithmetic_methods(Panel, ops._flex_method_PANEL,
+ flex_comp_method=ops._comp_method_PANEL)
Panel._add_numeric_operations()
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 059e399593971..fa07400a0706e 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -14,8 +14,7 @@
from pandas.compat import range
from pandas.compat.numpy import function as nv
-from pandas.core.dtypes.generic import (
- ABCSparseArray, ABCSparseSeries)
+from pandas.core.dtypes.generic import ABCSparseSeries
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
@@ -43,38 +42,6 @@
_sparray_doc_kwargs = dict(klass='SparseArray')
-def _arith_method_SPARSE_ARRAY(op, name, str_rep=None, default_axis=None):
- """
- Wrapper function for Series arithmetic operations, to avoid
- code duplication.
- """
-
- def wrapper(self, other):
- if isinstance(other, np.ndarray):
- if len(self) != len(other):
- raise AssertionError("length mismatch: {self} vs. {other}"
- .format(self=len(self), other=len(other)))
- if not isinstance(other, ABCSparseArray):
- dtype = getattr(other, 'dtype', None)
- other = SparseArray(other, fill_value=self.fill_value,
- dtype=dtype)
- return _sparse_array_op(self, other, op, name)
- elif is_scalar(other):
- with np.errstate(all='ignore'):
- fill = op(_get_fill(self), np.asarray(other))
- result = op(self.sp_values, other)
-
- return _wrap_result(name, result, self.sp_index, fill)
- else: # pragma: no cover
- raise TypeError('operation with {other} not supported'
- .format(other=type(other)))
-
- if name.startswith("__"):
- name = name[2:-2]
- wrapper.__name__ = name
- return wrapper
-
-
def _get_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
@@ -864,7 +831,6 @@ def _make_index(length, indices, kind):
ops.add_special_arithmetic_methods(SparseArray,
- arith_method=_arith_method_SPARSE_ARRAY,
- comp_method=_arith_method_SPARSE_ARRAY,
- bool_method=_arith_method_SPARSE_ARRAY,
- use_numexpr=False)
+ arith_method=ops._arith_method_SPARSE_ARRAY,
+ comp_method=ops._arith_method_SPARSE_ARRAY,
+ bool_method=ops._arith_method_SPARSE_ARRAY)
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index c7f5b0ba67c19..cc08ccf77ad26 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -981,7 +981,5 @@ def homogenize(series_dict):
# use unaccelerated ops for sparse objects
-ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False,
- **ops.frame_flex_funcs)
-ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False,
- **ops.frame_special_funcs)
+ops.add_flex_arithmetic_methods(SparseDataFrame, **ops.frame_flex_funcs)
+ops.add_special_arithmetic_methods(SparseDataFrame, **ops.frame_special_funcs)
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 2c8fd20f8eab1..4e207f9d1838c 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -9,12 +9,10 @@
import warnings
from pandas.core.dtypes.missing import isna, notna
-from pandas.core.dtypes.common import is_scalar
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
-from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
@@ -23,7 +21,7 @@
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
- make_sparse, _sparse_array_op, SparseArray,
+ make_sparse, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
@@ -37,53 +35,6 @@
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
-# -----------------------------------------------------------------------------
-# Wrapper function for Series arithmetic methods
-
-
-def _arith_method_SPARSE_SERIES(op, name, str_rep=None, default_axis=None):
- """
- Wrapper function for Series arithmetic operations, to avoid
- code duplication.
-
- str_rep and default_axis are not used, but are
- present for compatibility.
- """
-
- def wrapper(self, other):
- if isinstance(other, Series):
- if not isinstance(other, SparseSeries):
- other = other.to_sparse(fill_value=self.fill_value)
- return _sparse_series_op(self, other, op, name)
- elif isinstance(other, DataFrame):
- return NotImplemented
- elif is_scalar(other):
- with np.errstate(all='ignore'):
- new_values = op(self.values, other)
- return self._constructor(new_values,
- index=self.index,
- name=self.name)
- else: # pragma: no cover
- raise TypeError('operation with {other} not supported'
- .format(other=type(other)))
-
- wrapper.__name__ = name
- if name.startswith("__"):
- # strip special method names, e.g. `__add__` needs to be `add` when
- # passed to _sparse_series_op
- name = name[2:-2]
- return wrapper
-
-
-def _sparse_series_op(left, right, op, name):
- left, right = left.align(right, join='outer', copy=False)
- new_index = left.index
- new_name = com._maybe_match_name(left, right)
-
- result = _sparse_array_op(left.values, right.values, op, name,
- series=True)
- return left._constructor(result, index=new_index, name=new_name)
-
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
@@ -861,14 +812,11 @@ def from_coo(cls, A, dense_index=False):
# overwrite series methods with unaccelerated versions
-ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
- **ops.series_special_funcs)
-ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
- **ops.series_flex_funcs)
+ops.add_special_arithmetic_methods(SparseSeries, **ops.series_special_funcs)
+ops.add_flex_arithmetic_methods(SparseSeries, **ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries,
- arith_method=_arith_method_SPARSE_SERIES,
- comp_method=_arith_method_SPARSE_SERIES,
- bool_method=None, use_numexpr=False,
- force=True)
+ ops._arith_method_SPARSE_SERIES,
+ comp_method=ops._arith_method_SPARSE_SERIES,
+ bool_method=None, force=True)
| Follow-up to #19346, making **kwargs more explicit, documenting how they are chosen, moving one method from `Panel` that belongs in `core.ops`.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19396 | 2018-01-25T17:58:22Z | 2018-01-27T01:20:21Z | 2018-01-27T01:20:21Z | 2018-01-31T06:49:27Z |
Separate non-scalar tests from test_timestamps | diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index 65dd166e1f6a8..e0fc6c470fe57 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -428,6 +428,16 @@ def test_applymap(self):
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
+ def test_applymap_box_timestamps(self):
+ # #2689, #2627
+ ser = pd.Series(date_range('1/1/2000', periods=10))
+
+ def func(x):
+ return (x.hour, x.day, x.month)
+
+ # it works!
+ pd.DataFrame(ser).applymap(func)
+
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index a91dbd905e12c..fb8dd1a43aa7f 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -144,6 +144,25 @@ def test_numpy_minmax(self):
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
+ def test_round_daily(self):
+ dti = pd.date_range('20130101 09:10:11', periods=5)
+ result = dti.round('D')
+ expected = pd.date_range('20130101', periods=5)
+ tm.assert_index_equal(result, expected)
+
+ dti = dti.tz_localize('UTC').tz_convert('US/Eastern')
+ result = dti.round('D')
+ expected = pd.date_range('20130101',
+ periods=5).tz_localize('US/Eastern')
+ tm.assert_index_equal(result, expected)
+
+ result = dti.round('s')
+ tm.assert_index_equal(result, dti)
+
+ # invalid
+ for freq in ['Y', 'M', 'foobar']:
+ pytest.raises(ValueError, lambda: dti.round(freq))
+
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index fd0c2b9d0218c..151a0750b7f6e 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -868,6 +868,13 @@ def test_fallback_success(self):
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
+ def test_to_html_timestamp(self):
+ rng = date_range('2000-01-01', periods=10)
+ df = DataFrame(np.random.randn(10, 4), index=rng)
+
+ result = df.to_html()
+ assert '2000-01-01' in result
+
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index a3e9a0442ea0b..2b72eef2c6712 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -19,17 +19,13 @@
from pandas.tseries import offsets
-from pandas._libs.tslibs import conversion, period
+from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR
from pandas.compat import long, PY3
-from pandas.util.testing import assert_series_equal
from pandas.compat.numpy import np_datetime64_compat
-from pandas import (Timestamp, date_range, Period, Timedelta, compat,
- Series, NaT, DataFrame)
-from pandas.tseries.frequencies import (RESO_DAY, RESO_HR, RESO_MIN, RESO_US,
- RESO_MS, RESO_SEC)
+from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampArithmetic(object):
@@ -54,6 +50,50 @@ def test_delta_preserve_nanos(self):
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
+ def test_timestamp_sub_datetime(self):
+ dt = datetime(2013, 10, 12)
+ ts = Timestamp(datetime(2013, 10, 13))
+ assert (ts - dt).days == 1
+ assert (dt - ts).days == -1
+
+ def test_addition_subtraction_types(self):
+ # Assert on the types resulting from Timestamp +/- various date/time
+ # objects
+ dt = datetime(2014, 3, 4)
+ td = timedelta(seconds=1)
+ # build a timestamp with a frequency, since then it supports
+ # addition/subtraction of integers
+ ts = Timestamp(dt, freq='D')
+
+ assert type(ts + 1) == Timestamp
+ assert type(ts - 1) == Timestamp
+
+ # Timestamp + datetime not supported, though subtraction is supported
+ # and yields timedelta more tests in tseries/base/tests/test_base.py
+ assert type(ts - dt) == Timedelta
+ assert type(ts + td) == Timestamp
+ assert type(ts - td) == Timestamp
+
+ # Timestamp +/- datetime64 not supported, so not tested (could possibly
+ # assert error raised?)
+ td64 = np.timedelta64(1, 'D')
+ assert type(ts + td64) == Timestamp
+ assert type(ts - td64) == Timestamp
+
+ def test_addition_subtraction_preserve_frequency(self):
+ ts = Timestamp('2014-03-05', freq='D')
+ td = timedelta(days=1)
+ original_freq = ts.freq
+
+ assert (ts + 1).freq == original_freq
+ assert (ts - 1).freq == original_freq
+ assert (ts + td).freq == original_freq
+ assert (ts - td).freq == original_freq
+
+ td64 = np.timedelta64(1, 'D')
+ assert (ts + td64).freq == original_freq
+ assert (ts - td64).freq == original_freq
+
class TestTimestampProperties(object):
@@ -76,6 +116,112 @@ def test_properties_business(self):
assert control.is_month_end
assert control.is_quarter_end
+ def test_fields(self):
+ def check(value, equal):
+ # that we are int/long like
+ assert isinstance(value, (int, long))
+ assert value == equal
+
+ # GH 10050
+ ts = Timestamp('2015-05-10 09:06:03.000100001')
+ check(ts.year, 2015)
+ check(ts.month, 5)
+ check(ts.day, 10)
+ check(ts.hour, 9)
+ check(ts.minute, 6)
+ check(ts.second, 3)
+ pytest.raises(AttributeError, lambda: ts.millisecond)
+ check(ts.microsecond, 100)
+ check(ts.nanosecond, 1)
+ check(ts.dayofweek, 6)
+ check(ts.quarter, 2)
+ check(ts.dayofyear, 130)
+ check(ts.week, 19)
+ check(ts.daysinmonth, 31)
+ check(ts.daysinmonth, 31)
+
+ # GH 13303
+ ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
+ check(ts.year, 2014)
+ check(ts.month, 12)
+ check(ts.day, 31)
+ check(ts.hour, 23)
+ check(ts.minute, 59)
+ check(ts.second, 0)
+ pytest.raises(AttributeError, lambda: ts.millisecond)
+ check(ts.microsecond, 0)
+ check(ts.nanosecond, 0)
+ check(ts.dayofweek, 2)
+ check(ts.quarter, 4)
+ check(ts.dayofyear, 365)
+ check(ts.week, 1)
+ check(ts.daysinmonth, 31)
+
+ ts = Timestamp('2014-01-01 00:00:00+01:00')
+ starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
+ for start in starts:
+ assert getattr(ts, start)
+ ts = Timestamp('2014-12-31 23:59:59+01:00')
+ ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
+ for end in ends:
+ assert getattr(ts, end)
+
+ @pytest.mark.parametrize('data, expected',
+ [(Timestamp('2017-08-28 23:00:00'), 'Monday'),
+ (Timestamp('2017-08-28 23:00:00', tz='EST'),
+ 'Monday')])
+ def test_weekday_name(self, data, expected):
+ # GH 17354
+ assert data.weekday_name == expected
+
+ @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
+ def test_is_leap_year(self, tz):
+ # GH 13727
+ dt = Timestamp('2000-01-01 00:00:00', tz=tz)
+ assert dt.is_leap_year
+ assert isinstance(dt.is_leap_year, bool)
+
+ dt = Timestamp('1999-01-01 00:00:00', tz=tz)
+ assert not dt.is_leap_year
+
+ dt = Timestamp('2004-01-01 00:00:00', tz=tz)
+ assert dt.is_leap_year
+
+ dt = Timestamp('2100-01-01 00:00:00', tz=tz)
+ assert not dt.is_leap_year
+
+ def test_woy_boundary(self):
+ # make sure weeks at year boundaries are correct
+ d = datetime(2013, 12, 31)
+ result = Timestamp(d).week
+ expected = 1 # ISO standard
+ assert result == expected
+
+ d = datetime(2008, 12, 28)
+ result = Timestamp(d).week
+ expected = 52 # ISO standard
+ assert result == expected
+
+ d = datetime(2009, 12, 31)
+ result = Timestamp(d).week
+ expected = 53 # ISO standard
+ assert result == expected
+
+ d = datetime(2010, 1, 1)
+ result = Timestamp(d).week
+ expected = 53 # ISO standard
+ assert result == expected
+
+ d = datetime(2010, 1, 3)
+ result = Timestamp(d).week
+ expected = 53 # ISO standard
+ assert result == expected
+
+ result = np.array([Timestamp(datetime(*args)).week
+ for args in [(2000, 1, 1), (2000, 1, 2), (
+ 2005, 1, 1), (2005, 1, 2)]])
+ assert (result == [52, 52, 53, 53]).all()
+
class TestTimestampConstructors(object):
@@ -310,24 +456,60 @@ def test_constructor_fromordinal(self):
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
+ def test_out_of_bounds_value(self):
+ one_us = np.timedelta64(1).astype('timedelta64[us]')
-class TestTimestamp(object):
+ # By definition we can't go out of bounds in [ns], so we
+ # convert the datetime64s to [us] so we can go out of bounds
+ min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
+ max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
- def test_conversion(self):
- # GH 9255
- ts = Timestamp('2000-01-01')
+ # No error for the min/max datetimes
+ Timestamp(min_ts_us)
+ Timestamp(max_ts_us)
- result = ts.to_pydatetime()
- expected = datetime(2000, 1, 1)
- assert result == expected
- assert type(result) == type(expected)
+ # One us less than the minimum is an error
+ with pytest.raises(ValueError):
+ Timestamp(min_ts_us - one_us)
- result = ts.to_datetime64()
- expected = np.datetime64(ts.value, 'ns')
- assert result == expected
- assert type(result) == type(expected)
- assert result.dtype == expected.dtype
+ # One us more than the maximum is an error
+ with pytest.raises(ValueError):
+ Timestamp(max_ts_us + one_us)
+
+ def test_out_of_bounds_string(self):
+ with pytest.raises(ValueError):
+ Timestamp('1676-01-01')
+ with pytest.raises(ValueError):
+ Timestamp('2263-01-01')
+ def test_bounds_with_different_units(self):
+ out_of_bounds_dates = ('1677-09-21', '2262-04-12')
+
+ time_units = ('D', 'h', 'm', 's', 'ms', 'us')
+
+ for date_string in out_of_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
+ with pytest.raises(ValueError):
+ Timestamp(dt64)
+
+ in_bounds_dates = ('1677-09-23', '2262-04-11')
+
+ for date_string in in_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
+ Timestamp(dt64)
+
+ def test_min_valid(self):
+ # Ensure that Timestamp.min is a valid Timestamp
+ Timestamp(Timestamp.min)
+
+ def test_max_valid(self):
+ # Ensure that Timestamp.max is a valid Timestamp
+ Timestamp(Timestamp.max)
+
+
+class TestTimestamp(object):
@pytest.mark.parametrize('freq', ['D', 'M', 'S', 'N'])
@pytest.mark.parametrize('date', ['2014-03-07', '2014-01-01 09:00',
'2014-01-01 00:00:00.000000001'])
@@ -394,22 +576,6 @@ def test_timestamp_repr_pre1900(self):
result = repr(stamp)
assert iso8601 in result
- def test_bounds_with_different_units(self):
- out_of_bounds_dates = ('1677-09-21', '2262-04-12', )
-
- time_units = ('D', 'h', 'm', 's', 'ms', 'us')
-
- for date_string in out_of_bounds_dates:
- for unit in time_units:
- pytest.raises(ValueError, Timestamp, np.datetime64(
- date_string, dtype='M8[%s]' % unit))
-
- in_bounds_dates = ('1677-09-23', '2262-04-11', )
-
- for date_string in in_bounds_dates:
- for unit in time_units:
- Timestamp(np.datetime64(date_string, dtype='M8[%s]' % unit))
-
def test_tz(self):
t = '2014-02-01 09:00'
ts = Timestamp(t)
@@ -435,11 +601,9 @@ def test_tz_localize_ambiguous(self):
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
- rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
- assert rng[1] == ts_dst
- assert rng[2] == ts_no_dst
- pytest.raises(ValueError, ts.tz_localize, 'US/Eastern',
- ambiguous='infer')
+ assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
+ with pytest.raises(ValueError):
+ ts.tz_localize('US/Eastern', ambiguous='infer')
# GH 8025
with tm.assert_raises_regex(TypeError,
@@ -501,24 +665,6 @@ def test_tz_convert_roundtrip(self, tz):
assert reset.tzinfo is None
assert reset == converted.tz_convert('UTC').tz_localize(None)
- def test_barely_oob_dts(self):
- one_us = np.timedelta64(1).astype('timedelta64[us]')
-
- # By definition we can't go out of bounds in [ns], so we
- # convert the datetime64s to [us] so we can go out of bounds
- min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
- max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
-
- # No error for the min/max datetimes
- Timestamp(min_ts_us)
- Timestamp(max_ts_us)
-
- # One us less than the minimum is an error
- pytest.raises(ValueError, Timestamp, min_ts_us - one_us)
-
- # One us more than the maximum is an error
- pytest.raises(ValueError, Timestamp, max_ts_us + one_us)
-
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
@@ -569,64 +715,6 @@ def test_asm8(self):
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
- def test_fields(self):
- def check(value, equal):
- # that we are int/long like
- assert isinstance(value, (int, compat.long))
- assert value == equal
-
- # GH 10050
- ts = Timestamp('2015-05-10 09:06:03.000100001')
- check(ts.year, 2015)
- check(ts.month, 5)
- check(ts.day, 10)
- check(ts.hour, 9)
- check(ts.minute, 6)
- check(ts.second, 3)
- pytest.raises(AttributeError, lambda: ts.millisecond)
- check(ts.microsecond, 100)
- check(ts.nanosecond, 1)
- check(ts.dayofweek, 6)
- check(ts.quarter, 2)
- check(ts.dayofyear, 130)
- check(ts.week, 19)
- check(ts.daysinmonth, 31)
- check(ts.daysinmonth, 31)
-
- # GH 13303
- ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
- check(ts.year, 2014)
- check(ts.month, 12)
- check(ts.day, 31)
- check(ts.hour, 23)
- check(ts.minute, 59)
- check(ts.second, 0)
- pytest.raises(AttributeError, lambda: ts.millisecond)
- check(ts.microsecond, 0)
- check(ts.nanosecond, 0)
- check(ts.dayofweek, 2)
- check(ts.quarter, 4)
- check(ts.dayofyear, 365)
- check(ts.week, 1)
- check(ts.daysinmonth, 31)
-
- ts = Timestamp('2014-01-01 00:00:00+01:00')
- starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
- for start in starts:
- assert getattr(ts, start)
- ts = Timestamp('2014-12-31 23:59:59+01:00')
- ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
- for end in ends:
- assert getattr(ts, end)
-
- @pytest.mark.parametrize('data, expected',
- [(Timestamp('2017-08-28 23:00:00'), 'Monday'),
- (Timestamp('2017-08-28 23:00:00', tz='EST'),
- 'Monday')])
- def test_weekday_name(self, data, expected):
- # GH 17354
- assert data.weekday_name == expected
-
def test_pprint(self):
# GH12622
import pprint
@@ -646,16 +734,6 @@ def test_pprint(self):
'foo': 1}"""
assert result == expected
- def test_to_pydatetime_nonzero_nano(self):
- ts = Timestamp('2011-01-01 9:00:00.123456789')
-
- # Warn the user of data loss (nanoseconds).
- with tm.assert_produces_warning(UserWarning,
- check_stacklevel=False):
- expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
- result = ts.to_pydatetime()
- assert result == expected
-
def test_round(self):
# round
@@ -684,11 +762,6 @@ def test_round(self):
expected = Timestamp('20130104 12:30:00')
assert result == expected
- dti = date_range('20130101 09:10:11', periods=5)
- result = dti.round('D')
- expected = date_range('20130101', periods=5)
- tm.assert_index_equal(result, expected)
-
# floor
dt = Timestamp('20130101 09:10:11')
result = dt.floor('D')
@@ -711,19 +784,6 @@ def test_round(self):
result = dt.round('s')
assert result == dt
- dti = date_range('20130101 09:10:11',
- periods=5).tz_localize('UTC').tz_convert('US/Eastern')
- result = dti.round('D')
- expected = date_range('20130101', periods=5).tz_localize('US/Eastern')
- tm.assert_index_equal(result, expected)
-
- result = dti.round('s')
- tm.assert_index_equal(result, dti)
-
- # invalid
- for freq in ['Y', 'M', 'foobar']:
- pytest.raises(ValueError, lambda: dti.round(freq))
-
# GH 14440 & 15578
result = Timestamp('2016-10-17 12:00:00.0015').round('ms')
expected = Timestamp('2016-10-17 12:00:00.002000')
@@ -845,7 +905,7 @@ def check(val, unit=None, h=1, s=1, us=0):
check(days, unit='D', h=0)
# using truediv, so these are like floats
- if compat.PY3:
+ if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
@@ -900,22 +960,6 @@ def test_hash_equivalent(self):
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
- @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
- def test_is_leap_year(self, tz):
- # GH 13727
- dt = Timestamp('2000-01-01 00:00:00', tz=tz)
- assert dt.is_leap_year
- assert isinstance(dt.is_leap_year, bool)
-
- dt = Timestamp('1999-01-01 00:00:00', tz=tz)
- assert not dt.is_leap_year
-
- dt = Timestamp('2004-01-01 00:00:00', tz=tz)
- assert dt.is_leap_year
-
- dt = Timestamp('2100-01-01 00:00:00', tz=tz)
- assert not dt.is_leap_year
-
@td.skip_if_windows
def test_timestamp(self):
# GH#17329
@@ -1017,13 +1061,6 @@ def test_compare_invalid(self):
assert val != np.float64(1)
assert val != np.int64(1)
- # ops testing
- df = DataFrame(np.random.randn(5, 2))
- a = df[0]
- b = Series(np.random.randn(5))
- b.name = Timestamp('2000-01-01')
- tm.assert_series_equal(a / b, 1 / (b / a))
-
def test_cant_compare_tz_naive_w_aware(self):
# see gh-1404
a = Timestamp('3/12/2012')
@@ -1112,41 +1149,6 @@ def test_timestamp_compare_scalars(self):
result = right_f(nat, rhs)
assert result == expected
- def test_timestamp_compare_series(self):
- # make sure we can compare Timestamps on the right AND left hand side
- # GH4982
- s = Series(date_range('20010101', periods=10), name='dates')
- s_nat = s.copy(deep=True)
-
- s[0] = Timestamp('nat')
- s[3] = Timestamp('nat')
-
- ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
-
- for left, right in ops.items():
- left_f = getattr(operator, left)
- right_f = getattr(operator, right)
-
- # no nats
- expected = left_f(s, Timestamp('20010109'))
- result = right_f(Timestamp('20010109'), s)
- tm.assert_series_equal(result, expected)
-
- # nats
- expected = left_f(s, Timestamp('nat'))
- result = right_f(Timestamp('nat'), s)
- tm.assert_series_equal(result, expected)
-
- # compare to timestamp with series containing nats
- expected = left_f(s_nat, Timestamp('20010109'))
- result = right_f(Timestamp('20010109'), s_nat)
- tm.assert_series_equal(result, expected)
-
- # compare to nat with series containing nats
- expected = left_f(s_nat, Timestamp('nat'))
- result = right_f(Timestamp('nat'), s_nat)
- tm.assert_series_equal(result, expected)
-
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
@@ -1250,79 +1252,6 @@ def test_nanosecond_timestamp(self):
assert t.nanosecond == 10
-class TestTimestampOps(object):
-
- def test_timestamp_and_datetime(self):
- assert ((Timestamp(datetime(2013, 10, 13)) -
- datetime(2013, 10, 12)).days == 1)
- assert ((datetime(2013, 10, 12) -
- Timestamp(datetime(2013, 10, 13))).days == -1)
-
- def test_timestamp_and_series(self):
- timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D',
- tz='US/Eastern'))
- first_timestamp = timestamp_series[0]
-
- delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
- assert_series_equal(timestamp_series - first_timestamp, delta_series)
- assert_series_equal(first_timestamp - timestamp_series, -delta_series)
-
- def test_addition_subtraction_types(self):
- # Assert on the types resulting from Timestamp +/- various date/time
- # objects
- datetime_instance = datetime(2014, 3, 4)
- timedelta_instance = timedelta(seconds=1)
- # build a timestamp with a frequency, since then it supports
- # addition/subtraction of integers
- timestamp_instance = Timestamp(datetime_instance, freq='D')
-
- assert type(timestamp_instance + 1) == Timestamp
- assert type(timestamp_instance - 1) == Timestamp
-
- # Timestamp + datetime not supported, though subtraction is supported
- # and yields timedelta more tests in tseries/base/tests/test_base.py
- assert type(timestamp_instance - datetime_instance) == Timedelta
- assert type(timestamp_instance + timedelta_instance) == Timestamp
- assert type(timestamp_instance - timedelta_instance) == Timestamp
-
- # Timestamp +/- datetime64 not supported, so not tested (could possibly
- # assert error raised?)
- timedelta64_instance = np.timedelta64(1, 'D')
- assert type(timestamp_instance + timedelta64_instance) == Timestamp
- assert type(timestamp_instance - timedelta64_instance) == Timestamp
-
- def test_addition_subtraction_preserve_frequency(self):
- timestamp_instance = Timestamp('2014-03-05', freq='D')
- timedelta_instance = timedelta(days=1)
- original_freq = timestamp_instance.freq
-
- assert (timestamp_instance + 1).freq == original_freq
- assert (timestamp_instance - 1).freq == original_freq
- assert (timestamp_instance + timedelta_instance).freq == original_freq
- assert (timestamp_instance - timedelta_instance).freq == original_freq
-
- timedelta64_instance = np.timedelta64(1, 'D')
- assert (timestamp_instance +
- timedelta64_instance).freq == original_freq
- assert (timestamp_instance -
- timedelta64_instance).freq == original_freq
-
- @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo', 'US/Eastern',
- 'dateutil/US/Eastern'])
- def test_resolution(self, tz):
-
- for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
- 'S', 'L', 'U'],
- [RESO_DAY, RESO_DAY,
- RESO_DAY, RESO_DAY,
- RESO_HR, RESO_MIN,
- RESO_SEC, RESO_MS,
- RESO_US]):
- idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
- result = period.resolution(idx.asi8, idx.tz)
- assert result == expected
-
-
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
@@ -1347,6 +1276,31 @@ def test_compare_hour13(self):
class TestTimestampConversion(object):
+ def test_conversion(self):
+ # GH#9255
+ ts = Timestamp('2000-01-01')
+
+ result = ts.to_pydatetime()
+ expected = datetime(2000, 1, 1)
+ assert result == expected
+ assert type(result) == type(expected)
+
+ result = ts.to_datetime64()
+ expected = np.datetime64(ts.value, 'ns')
+ assert result == expected
+ assert type(result) == type(expected)
+ assert result.dtype == expected.dtype
+
+ def test_to_pydatetime_nonzero_nano(self):
+ ts = Timestamp('2011-01-01 9:00:00.123456789')
+
+ # Warn the user of data loss (nanoseconds).
+ with tm.assert_produces_warning(UserWarning,
+ check_stacklevel=False):
+ expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
+ result = ts.to_pydatetime()
+ assert result == expected
+
def test_timestamp_to_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
@@ -1384,102 +1338,3 @@ def test_to_datetime_bijective(self):
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 ==
Timestamp.min.value / 1000)
-
-
-class TestTimeSeries(object):
-
- def test_timestamp_date_out_of_range(self):
- pytest.raises(ValueError, Timestamp, '1676-01-01')
- pytest.raises(ValueError, Timestamp, '2263-01-01')
-
- def test_timestamp_equality(self):
-
- # GH 11034
- s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT'])
- result = s != s
- assert_series_equal(result, Series([False, True]))
- result = s != s[0]
- assert_series_equal(result, Series([False, True]))
- result = s != s[1]
- assert_series_equal(result, Series([True, True]))
-
- result = s == s
- assert_series_equal(result, Series([True, False]))
- result = s == s[0]
- assert_series_equal(result, Series([True, False]))
- result = s == s[1]
- assert_series_equal(result, Series([False, False]))
-
- def test_series_box_timestamp(self):
- rng = date_range('20090415', '20090519', freq='B')
- s = Series(rng)
-
- assert isinstance(s[5], Timestamp)
-
- rng = date_range('20090415', '20090519', freq='B')
- s = Series(rng, index=rng)
- assert isinstance(s[5], Timestamp)
-
- assert isinstance(s.iat[5], Timestamp)
-
- def test_to_html_timestamp(self):
- rng = date_range('2000-01-01', periods=10)
- df = DataFrame(np.random.randn(10, 4), index=rng)
-
- result = df.to_html()
- assert '2000-01-01' in result
-
- def test_series_map_box_timestamps(self):
- # #2689, #2627
- s = Series(date_range('1/1/2000', periods=10))
-
- def f(x):
- return (x.hour, x.day, x.month)
-
- # it works!
- s.map(f)
- s.apply(f)
- DataFrame(s).applymap(f)
-
- def test_woy_boundary(self):
- # make sure weeks at year boundaries are correct
- d = datetime(2013, 12, 31)
- result = Timestamp(d).week
- expected = 1 # ISO standard
- assert result == expected
-
- d = datetime(2008, 12, 28)
- result = Timestamp(d).week
- expected = 52 # ISO standard
- assert result == expected
-
- d = datetime(2009, 12, 31)
- result = Timestamp(d).week
- expected = 53 # ISO standard
- assert result == expected
-
- d = datetime(2010, 1, 1)
- result = Timestamp(d).week
- expected = 53 # ISO standard
- assert result == expected
-
- d = datetime(2010, 1, 3)
- result = Timestamp(d).week
- expected = 53 # ISO standard
- assert result == expected
-
- result = np.array([Timestamp(datetime(*args)).week
- for args in [(2000, 1, 1), (2000, 1, 2), (
- 2005, 1, 1), (2005, 1, 2)]])
- assert (result == [52, 52, 53, 53]).all()
-
-
-class TestTsUtil(object):
-
- def test_min_valid(self):
- # Ensure that Timestamp.min is a valid Timestamp
- Timestamp(Timestamp.min)
-
- def test_max_valid(self):
- # Ensure that Timestamp.max is a valid Timestamp
- Timestamp(Timestamp.max)
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index 8899ab585d6cb..3822ecd0a1b0e 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -77,6 +77,17 @@ def test_apply_args(self):
assert result[0] == ['foo', 'bar']
assert isinstance(result[0], list)
+ def test_series_map_box_timestamps(self):
+ # GH#2689, GH#2627
+ ser = Series(pd.date_range('1/1/2000', periods=10))
+
+ def func(x):
+ return (x.hour, x.day, x.month)
+
+ # it works!
+ ser.map(func)
+ ser.apply(func)
+
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 9db05ff590fed..ca558dd6b7cd5 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -1,10 +1,77 @@
# -*- coding: utf-8 -*-
from datetime import timedelta
+import operator
+
+import numpy as np
import pandas as pd
import pandas.util.testing as tm
+class TestSeriesComparison(object):
+ def test_compare_invalid(self):
+ # GH#8058
+ # ops testing
+ a = pd.Series(np.random.randn(5), name=0)
+ b = pd.Series(np.random.randn(5))
+ b.name = pd.Timestamp('2000-01-01')
+ tm.assert_series_equal(a / b, 1 / (b / a))
+
+
+class TestTimestampSeriesComparison(object):
+ def test_timestamp_compare_series(self):
+ # make sure we can compare Timestamps on the right AND left hand side
+ # GH#4982
+ ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
+ s_nat = ser.copy(deep=True)
+
+ ser[0] = pd.Timestamp('nat')
+ ser[3] = pd.Timestamp('nat')
+
+ ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
+
+ for left, right in ops.items():
+ left_f = getattr(operator, left)
+ right_f = getattr(operator, right)
+
+ # no nats
+ expected = left_f(ser, pd.Timestamp('20010109'))
+ result = right_f(pd.Timestamp('20010109'), ser)
+ tm.assert_series_equal(result, expected)
+
+ # nats
+ expected = left_f(ser, pd.Timestamp('nat'))
+ result = right_f(pd.Timestamp('nat'), ser)
+ tm.assert_series_equal(result, expected)
+
+ # compare to timestamp with series containing nats
+ expected = left_f(s_nat, pd.Timestamp('20010109'))
+ result = right_f(pd.Timestamp('20010109'), s_nat)
+ tm.assert_series_equal(result, expected)
+
+ # compare to nat with series containing nats
+ expected = left_f(s_nat, pd.Timestamp('nat'))
+ result = right_f(pd.Timestamp('nat'), s_nat)
+ tm.assert_series_equal(result, expected)
+
+ def test_timestamp_equality(self):
+ # GH#11034
+ ser = pd.Series([pd.Timestamp('2000-01-29 01:59:00'), 'NaT'])
+ result = ser != ser
+ tm.assert_series_equal(result, pd.Series([False, True]))
+ result = ser != ser[0]
+ tm.assert_series_equal(result, pd.Series([False, True]))
+ result = ser != ser[1]
+ tm.assert_series_equal(result, pd.Series([True, True]))
+
+ result = ser == ser
+ tm.assert_series_equal(result, pd.Series([True, False]))
+ result = ser == ser[0]
+ tm.assert_series_equal(result, pd.Series([True, False]))
+ result = ser == ser[1]
+ tm.assert_series_equal(result, pd.Series([False, False]))
+
+
class TestTimedeltaSeriesComparisons(object):
def test_compare_timedelta_series(self):
# regresssion test for GH5963
@@ -55,3 +122,15 @@ def test_ops_series_period(self):
expected = pd.Series([4, 2], name='xxx', dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -expected)
+
+
+class TestTimestampSeriesArithmetic(object):
+ def test_timestamp_sub_series(self):
+ ser = pd.Series(pd.date_range('2014-03-17', periods=2, freq='D',
+ tz='US/Eastern'))
+ ts = ser[0]
+
+ delta_series = pd.Series([np.timedelta64(0, 'D'),
+ np.timedelta64(1, 'D')])
+ tm.assert_series_equal(ser - ts, delta_series)
+ tm.assert_series_equal(ts - ser, -delta_series)
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index bafc6d268c266..fbfbad547ce1b 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -610,6 +610,18 @@ def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
+ def test_series_box_timestamp(self):
+ rng = pd.date_range('20090415', '20090519', freq='B')
+ ser = Series(rng)
+
+ assert isinstance(ser[5], pd.Timestamp)
+
+ rng = pd.date_range('20090415', '20090519', freq='B')
+ ser = Series(rng, index=rng)
+ assert isinstance(ser[5], pd.Timestamp)
+
+ assert isinstance(ser.iat[5], pd.Timestamp)
+
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
| Getting DataFrame, Series, Index tests out of tests.scalars and organizing them better, should make it easier to go and parametrize some older tests, in particular arithmetic and comparisons. | https://api.github.com/repos/pandas-dev/pandas/pulls/19385 | 2018-01-24T23:18:41Z | 2018-01-25T12:01:56Z | 2018-01-25T12:01:56Z | 2018-01-31T06:49:34Z |
Remove unused from _libs.parsers | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 932ae8b1a33d0..efe61716d0831 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -7,9 +7,8 @@ import warnings
from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE
-from libc.stdio cimport fopen, fclose
-from libc.stdlib cimport malloc, free
-from libc.string cimport strncpy, strlen, strcmp, strcasecmp
+from libc.stdlib cimport free
+from libc.string cimport strncpy, strlen, strcasecmp
cimport cython
from cython cimport Py_ssize_t
@@ -27,9 +26,6 @@ cdef extern from "Python.h":
object PyUnicode_Decode(char *v, Py_ssize_t size, char *encoding,
char *errors)
-cdef extern from "stdlib.h":
- void memcpy(void *dst, void *src, size_t n)
-
import numpy as np
cimport numpy as cnp
@@ -50,7 +46,7 @@ from khash cimport (
import pandas.compat as compat
from pandas.core.dtypes.common import (
- is_categorical_dtype, CategoricalDtype,
+ is_categorical_dtype,
is_integer_dtype, is_float_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype,
@@ -90,9 +86,6 @@ try:
except NameError:
basestring = str
-cdef extern from "src/numpy_helper.h":
- void transfer_object_column(char *dst, char *src, size_t stride,
- size_t length)
cdef extern from "parser/tokenizer.h":
@@ -232,8 +225,6 @@ cdef extern from "parser/tokenizer.h":
int parser_trim_buffers(parser_t *self)
- void debug_print_parser(parser_t *self)
-
int tokenize_all_rows(parser_t *self) nogil
int tokenize_nrows(parser_t *self, size_t nrows) nogil
@@ -249,7 +240,6 @@ cdef extern from "parser/tokenizer.h":
double round_trip(const char *p, char **q, char decimal, char sci,
char tsep, int skip_trailing) nogil
- int to_longlong(char *item, long long *p_value) nogil
int to_boolean(const char *item, uint8_t *val) nogil
@@ -875,9 +865,6 @@ cdef class TextReader:
return header, field_count
- cdef _implicit_index_count(self):
- pass
-
def read(self, rows=None):
"""
rows=None --> read all rows
@@ -997,9 +984,6 @@ cdef class TextReader:
return columns
- def debug_print(self):
- debug_print_parser(self.parser)
-
cdef _start_clock(self):
self.clocks.append(time.time())
@@ -1346,6 +1330,7 @@ cdef class TextReader:
else:
return None
+
cdef object _true_values = [b'True', b'TRUE', b'true']
cdef object _false_values = [b'False', b'FALSE', b'false']
@@ -1375,21 +1360,6 @@ cdef asbytes(object o):
_NA_VALUES = _ensure_encoded(list(com._NA_VALUES))
-def _is_file_like(obj):
- if PY3:
- import io
- if isinstance(obj, io.TextIOWrapper):
- raise ParserError('Cannot handle open unicode files (yet)')
-
- # BufferedReader is a byte reader for Python 3
- file = io.BufferedReader
- else:
- import __builtin__
- file = __builtin__.file
-
- return isinstance(obj, (basestring, file))
-
-
def _maybe_upcast(arr):
"""
@@ -1479,6 +1449,7 @@ cdef _string_box_factorize(parser_t *parser, int64_t col,
return result, na_count
+
cdef _string_box_utf8(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
bint na_filter, kh_str_t *na_hashset):
@@ -1532,6 +1503,7 @@ cdef _string_box_utf8(parser_t *parser, int64_t col,
return result, na_count
+
cdef _string_box_decode(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
bint na_filter, kh_str_t *na_hashset,
@@ -1662,6 +1634,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col,
kh_destroy_str(table)
return np.asarray(codes), result, na_count
+
cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start,
int64_t line_end, int64_t width):
cdef:
@@ -1679,6 +1652,7 @@ cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start,
return result
+
cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
size_t width, char *data) nogil:
@@ -1694,10 +1668,12 @@ cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col,
strncpy(data, word, width)
data += width
+
cdef char* cinf = b'inf'
cdef char* cposinf = b'+inf'
cdef char* cneginf = b'-inf'
+
cdef _try_double(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
bint na_filter, kh_str_t *na_hashset, object na_flist):
@@ -1738,6 +1714,7 @@ cdef _try_double(parser_t *parser, int64_t col,
return None, None
return result, na_count
+
cdef inline int _try_double_nogil(parser_t *parser,
double (*double_converter)(
const char *, char **, char,
@@ -1808,6 +1785,7 @@ cdef inline int _try_double_nogil(parser_t *parser,
return 0
+
cdef _try_uint64(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
bint na_filter, kh_str_t *na_hashset):
@@ -1843,6 +1821,7 @@ cdef _try_uint64(parser_t *parser, int64_t col,
return result
+
cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col,
int64_t line_start,
int64_t line_end, bint na_filter,
@@ -1881,6 +1860,7 @@ cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col,
return 0
+
cdef _try_int64(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
bint na_filter, kh_str_t *na_hashset):
@@ -1909,6 +1889,7 @@ cdef _try_int64(parser_t *parser, int64_t col,
return result, na_count
+
cdef inline int _try_int64_nogil(parser_t *parser, int64_t col,
int64_t line_start,
int64_t line_end, bint na_filter,
@@ -1948,69 +1929,6 @@ cdef inline int _try_int64_nogil(parser_t *parser, int64_t col,
return 0
-cdef _try_bool(parser_t *parser, int64_t col,
- int64_t line_start, int64_t line_end,
- bint na_filter, kh_str_t *na_hashset):
- cdef:
- int na_count
- Py_ssize_t lines = line_end - line_start
- uint8_t *data
- cnp.ndarray[cnp.uint8_t, ndim=1] result
-
- uint8_t NA = na_values[np.bool_]
-
- result = np.empty(lines)
- data = <uint8_t *> result.data
-
- with nogil:
- error = _try_bool_nogil(parser, col, line_start,
- line_end, na_filter,
- na_hashset, NA, data,
- &na_count)
- if error != 0:
- return None, None
- return result.view(np.bool_), na_count
-
-cdef inline int _try_bool_nogil(parser_t *parser, int64_t col,
- int64_t line_start,
- int64_t line_end, bint na_filter,
- const kh_str_t *na_hashset, uint8_t NA,
- uint8_t *data, int *na_count) nogil:
- cdef:
- int error
- Py_ssize_t i, lines = line_end - line_start
- coliter_t it
- const char *word = NULL
- khiter_t k
- na_count[0] = 0
-
- coliter_setup(&it, parser, col, line_start)
-
- if na_filter:
- for i in range(lines):
- COLITER_NEXT(it, word)
-
- k = kh_get_str(na_hashset, word)
- # in the hash table
- if k != na_hashset.n_buckets:
- na_count[0] += 1
- data[0] = NA
- data += 1
- continue
-
- error = to_boolean(word, data)
- if error != 0:
- return error
- data += 1
- else:
- for i in range(lines):
- COLITER_NEXT(it, word)
-
- error = to_boolean(word, data)
- if error != 0:
- return error
- data += 1
- return 0
cdef _try_bool_flex(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
@@ -2039,6 +1957,7 @@ cdef _try_bool_flex(parser_t *parser, int64_t col,
return None, None
return result.view(np.bool_), na_count
+
cdef inline int _try_bool_flex_nogil(parser_t *parser, int64_t col,
int64_t line_start,
int64_t line_end, bint na_filter,
@@ -2131,6 +2050,7 @@ cdef kh_str_t* kset_from_list(list values) except NULL:
return table
+
cdef kh_float64_t* kset_float64_from_list(values) except NULL:
# caller takes responsibility for freeing the hash table
cdef:
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h
index de3486eca3e9b..6c2029fff8a1a 100644
--- a/pandas/_libs/src/numpy_helper.h
+++ b/pandas/_libs/src/numpy_helper.h
@@ -75,22 +75,6 @@ PANDAS_INLINE PyObject* char_to_string(char* data) {
#endif
}
-void transfer_object_column(char* dst, char* src, size_t stride,
- size_t length) {
- size_t i;
- size_t sz = sizeof(PyObject*);
-
- for (i = 0; i < length; ++i) {
- // uninitialized data
-
- // Py_XDECREF(*((PyObject**) dst));
-
- memcpy(dst, src, sz);
- Py_INCREF(*((PyObject**)dst));
- src += sz;
- dst += stride;
- }
-}
void set_array_not_contiguous(PyArrayObject* ao) {
ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS);
diff --git a/pandas/_libs/src/parser/.gitignore b/pandas/_libs/src/parser/.gitignore
deleted file mode 100644
index f07e771a35eec..0000000000000
--- a/pandas/_libs/src/parser/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-!*.c
-test*
\ No newline at end of file
diff --git a/pandas/_libs/src/parser/Makefile b/pandas/_libs/src/parser/Makefile
deleted file mode 100644
index ec88eaf44ba15..0000000000000
--- a/pandas/_libs/src/parser/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-PYTHONBASE = /Library/Frameworks/EPD64.framework/Versions/Current
-NUMPY_INC = /Library/Frameworks/EPD64.framework/Versions/7.1/lib/python2.7/site-packages/numpy/core/include
-PYTHON_INC = -I$(PYTHONBASE)/include/python2.7 -I$(NUMPY_INC)
-PYTHON_LINK = -L$(PYTHONBASE)/lib -lpython
-
-SOURCES = conversions.c parser.c str_to.c
-
-check-syntax:
- gcc -g $(PYTHON_INC) -o /dev/null -S ${CHK_SOURCES}
-
-test: $(SOURCES)
- gcc $(PYTHON_INC) -o test $(SOURCES)
- ./test
\ No newline at end of file
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 2e4ade209fa38..6e8c220eab6b8 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -1317,21 +1317,6 @@ int parser_trim_buffers(parser_t *self) {
return 0;
}
-void debug_print_parser(parser_t *self) {
- int64_t j, line;
- char *token;
-
- for (line = 0; line < self->lines; ++line) {
- printf("(Parsed) Line %lld: ", (long long)line);
-
- for (j = 0; j < self->line_fields[j]; ++j) {
- token = self->words[j + self->line_start[line]];
- printf("%s ", token);
- }
- printf("\n");
- }
-}
-
/*
nrows : number of rows to tokenize (or until reach EOF)
all : tokenize all the data vs. certain number of rows
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index 9462608a26814..63baf91e3c136 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -247,8 +247,6 @@ void parser_del(parser_t *self);
void parser_set_default_options(parser_t *self);
-void debug_print_parser(parser_t *self);
-
int tokenize_nrows(parser_t *self, size_t nrows);
int tokenize_all_rows(parser_t *self);
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19380 | 2018-01-24T19:26:23Z | 2018-01-25T11:54:45Z | 2018-01-25T11:54:45Z | 2018-01-31T06:49:29Z |
BUG SparseDataFrame with dense Series (#19374) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 4dde76dee46a5..246eab386b2ab 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -512,7 +512,7 @@ Groupby/Resample/Rolling
Sparse
^^^^^^
--
+- Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`)
-
-
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index c7f5b0ba67c19..eb3184f371a0c 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -95,6 +95,9 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None,
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
+ elif isinstance(data, Series):
+ mgr = self._init_dict(data.to_frame(), data.index,
+ columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
@@ -116,6 +119,10 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None,
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
+ else:
+ msg = ('SparseDataFrame called with unkown type "{data_type}" '
+ 'for data argument')
+ raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 058892e3b85ff..2b589ebd4735e 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -199,6 +199,29 @@ def test_constructor_from_series(self):
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
+ def test_constructor_from_dense_series(self):
+ # GH 19393
+ # series with name
+ x = Series(np.random.randn(10000), name='a')
+ result = SparseDataFrame(x)
+ expected = x.to_frame().to_sparse()
+ tm.assert_sp_frame_equal(result, expected)
+
+ # series with no name
+ x = Series(np.random.randn(10000))
+ result = SparseDataFrame(x)
+ expected = x.to_frame().to_sparse()
+ tm.assert_sp_frame_equal(result, expected)
+
+ def test_constructor_from_unknown_type(self):
+ # GH 19393
+ class Unknown:
+ pass
+ with pytest.raises(TypeError,
+ message='SparseDataFrame called with unkown type '
+ '"Unknown" for data argument'):
+ SparseDataFrame(Unknown())
+
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
| - [X] closes #19374
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19377 | 2018-01-24T18:04:21Z | 2018-01-27T01:10:16Z | 2018-01-27T01:10:16Z | 2018-01-27T01:10:47Z |
Add CategoricalDtype to deprecated core.categorical shim | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 17435dfc48bde..530a3ecb5f378 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -5,3 +5,4 @@
FutureWarning, stacklevel=2)
from pandas.core.arrays import Categorical # noqa
+from pandas.core.dtypes.dtypes import CategoricalDtype # noqa
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 4a10ed6e7402c..c20767b09178c 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -259,3 +259,8 @@ def test_categorical_move(self):
with tm.assert_produces_warning(FutureWarning):
from pandas.core.categorical import Categorical # noqa
+
+ sys.modules.pop("pandas.core.categorical", None)
+
+ with tm.assert_produces_warning(FutureWarning):
+ from pandas.core.categorical import CategoricalDtype # noqa
| At least fastparquet was using it | https://api.github.com/repos/pandas-dev/pandas/pulls/19373 | 2018-01-24T16:08:24Z | 2018-01-25T01:07:53Z | 2018-01-25T01:07:53Z | 2018-02-13T21:58:38Z |
implement ABCInterval | diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index e1ffd450c9a68..0718f8bd2b970 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -109,6 +109,7 @@ cdef class Interval(IntervalMixin):
cut, qcut : Convert arrays of continuous data into Categoricals/Series of
Interval.
"""
+ _typ = "interval"
cdef readonly object left
"""Left bound for the interval"""
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1632f5d016439..042b319d51abf 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -49,7 +49,6 @@ PyDateTime_IMPORT
from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value
from tslib import NaT, Timestamp, Timedelta, array_to_datetime
-from interval import Interval
from missing cimport checknull
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index e15f276b39bf8..46d3c2a9c04b2 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -38,7 +38,7 @@ cpdef bint is_decimal(object obj):
cpdef bint is_interval(object obj):
- return isinstance(obj, Interval)
+ return getattr(obj, '_typ', '_typ') == 'interval'
cpdef bint is_period(object val):
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 629d88aa7f086..6fae09c43d2be 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -54,6 +54,7 @@ def _check(cls, inst):
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ",
("dateoffset",))
+ABCInterval = create_pandas_abc_type("ABCInterval", "_typ", ("interval", ))
class _ABCGeneric(type):
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index bd365f9c3281f..58cb182e7d403 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -45,6 +45,8 @@ def test_abc_types(self):
gt.ABCDateOffset)
assert not isinstance(pd.Period('2012', freq='A-DEC'),
gt.ABCDateOffset)
+ assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval)
+ assert not isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCInterval)
def test_setattr_warnings():
| Avoid python-space import in lib
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19367 | 2018-01-24T03:48:45Z | 2018-01-25T01:10:11Z | 2018-01-25T01:10:11Z | 2018-02-11T21:58:30Z |
updated hist documentation | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 8b03d6ddde4ec..88b899ad60313 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2156,10 +2156,18 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
The size of the figure to create in inches by default
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms
- bins : integer, default 10
- Number of histogram bins to be used
+ bins : integer or sequence, default 10
+ Number of histogram bins to be used. If an integer is given, bins + 1
+ bin edges are calculated and returned. If bins is a sequence, gives
+ bin edges, including left edge of first bin and right edge of last
+ bin. In this case, bins is returned unmodified.
`**kwds` : other plotting keyword arguments
To be passed to hist function
+
+ See Also
+ --------
+ matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
+
"""
_converter._WARN = False
if by is not None:
@@ -2219,14 +2227,19 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
+ bins : integer or sequence, default 10
+ Number of histogram bins to be used. If an integer is given, bins + 1
+ bin edges are calculated and returned. If bins is a sequence, gives
+ bin edges, including left edge of first bin and right edge of last
+ bin. In this case, bins is returned unmodified.
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
- Notes
- -----
- See matplotlib documentation online for more on this
+ See Also
+ --------
+ matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
import matplotlib.pyplot as plt
| pandas.DataFrame.hist()/pandas.Series.hist() currently say that the `bins` keyword argument must be an integer, with a default value of 10. Since these methods are built off matplotlib, these actually seem to be more flexible (allowing sequences as well as integers). I don't know if there was a reason for this discrepancy, but if not, I've adjusted the pandas docstrings to reflect the matplotlib documentation.
[Existing pandas documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html)
[Existing matplotlib documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.hist.html)
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19366 | 2018-01-24T03:45:40Z | 2018-02-01T13:15:43Z | 2018-02-01T13:15:43Z | 2018-06-22T06:48:58Z |
remove unused convert_sql_column | diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index e15f276b39bf8..39656239aae76 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -1390,10 +1390,6 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
return objects
-def convert_sql_column(x):
- return maybe_convert_objects(x, try_float=1)
-
-
def sanitize_objects(ndarray[object] values, set na_values,
convert_empty=True):
cdef:
diff --git a/pandas/tests/dtypes/test_io.py b/pandas/tests/dtypes/test_io.py
deleted file mode 100644
index 06b61371c9a0b..0000000000000
--- a/pandas/tests/dtypes/test_io.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import pandas._libs.lib as lib
-import pandas.util.testing as tm
-
-from pandas.compat import long, u
-
-
-class TestParseSQL(object):
-
- def test_convert_sql_column_floats(self):
- arr = np.array([1.5, None, 3, 4.2], dtype=object)
- result = lib.convert_sql_column(arr)
- expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_strings(self):
- arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
- result = lib.convert_sql_column(arr)
- expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_unicode(self):
- arr = np.array([u('1.5'), None, u('3'), u('4.2')],
- dtype=object)
- result = lib.convert_sql_column(arr)
- expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
- dtype=object)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_ints(self):
- arr = np.array([1, 2, 3, 4], dtype='O')
- arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
- result = lib.convert_sql_column(arr)
- result2 = lib.convert_sql_column(arr2)
- expected = np.array([1, 2, 3, 4], dtype='i8')
- tm.assert_numpy_array_equal(result, expected)
- tm.assert_numpy_array_equal(result2, expected)
-
- arr = np.array([1, 2, 3, None, 4], dtype='O')
- result = lib.convert_sql_column(arr)
- expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_longs(self):
- arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
- result = lib.convert_sql_column(arr)
- expected = np.array([1, 2, 3, 4], dtype='i8')
- tm.assert_numpy_array_equal(result, expected)
-
- arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
- result = lib.convert_sql_column(arr)
- expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_bools(self):
- arr = np.array([True, False, True, False], dtype='O')
- result = lib.convert_sql_column(arr)
- expected = np.array([True, False, True, False], dtype=bool)
- tm.assert_numpy_array_equal(result, expected)
-
- arr = np.array([True, False, None, False], dtype='O')
- result = lib.convert_sql_column(arr)
- expected = np.array([True, False, np.nan, False], dtype=object)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_convert_sql_column_decimals(self):
- from decimal import Decimal
- arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
- result = lib.convert_sql_column(arr)
- expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
- tm.assert_numpy_array_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19363 | 2018-01-23T16:41:44Z | 2018-01-24T11:10:06Z | 2018-01-24T11:10:06Z | 2018-02-11T21:59:32Z |
Remove unused functions, cimports | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index f8371d4855803..15aef867ba413 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -73,10 +73,6 @@ cpdef object get_value_box(ndarray arr, object loc):
return util.get_value_1d(arr, i)
-def set_value_at(ndarray arr, object loc, object val):
- return util.set_value_at(arr, loc, val)
-
-
# Don't populate hash tables in monotonic indexes larger than this
_SIZE_CUTOFF = 1000000
@@ -404,18 +400,6 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1:
else:
return mid + 1
-_pad_functions = {
- 'object': algos.pad_object,
- 'int64': algos.pad_int64,
- 'float64': algos.pad_float64
-}
-
-_backfill_functions = {
- 'object': algos.backfill_object,
- 'int64': algos.backfill_int64,
- 'float64': algos.backfill_float64
-}
-
cdef class DatetimeEngine(Int64Engine):
@@ -566,7 +550,7 @@ cpdef convert_scalar(ndarray arr, object value):
# we don't turn bools into int/float/complex
if arr.descr.type_num == NPY_DATETIME:
- if isinstance(value, np.ndarray):
+ if util.is_array(value):
pass
elif isinstance(value, (datetime, np.datetime64, date)):
return Timestamp(value).value
@@ -577,7 +561,7 @@ cpdef convert_scalar(ndarray arr, object value):
raise ValueError("cannot set a Timestamp with a non-timestamp")
elif arr.descr.type_num == NPY_TIMEDELTA:
- if isinstance(value, np.ndarray):
+ if util.is_array(value):
pass
elif isinstance(value, timedelta):
return Timedelta(value).value
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 93a45335efc9c..a5abe324254ce 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -4,6 +4,7 @@ cimport cython
from cython cimport Py_ssize_t
from cpython cimport PyObject
+from cpython.slice cimport PySlice_Check
cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
@@ -32,7 +33,7 @@ cdef class BlockPlacement:
self._has_slice = False
self._has_array = False
- if isinstance(val, slice):
+ if PySlice_Check(val):
slc = slice_canonize(val)
if slc.start != slc.stop:
@@ -118,7 +119,7 @@ cdef class BlockPlacement:
else:
val = self._as_array[loc]
- if not isinstance(val, slice) and val.ndim == 0:
+ if not PySlice_Check(val) and val.ndim == 0:
return val
return BlockPlacement(val)
@@ -288,7 +289,7 @@ def slice_getitem(slice slc not None, ind):
s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc)
- if isinstance(ind, slice):
+ if PySlice_Check(ind):
ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind,
s_len)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1632f5d016439..e337c2b25b887 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -17,8 +17,6 @@ from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
np.import_array()
np.import_ufunc()
-from libc.stdlib cimport malloc, free
-
from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
PyString_Check,
@@ -27,8 +25,7 @@ from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyTuple_New,
PyObject_RichCompareBool,
PyBytes_GET_SIZE,
- PyUnicode_GET_SIZE,
- PyObject)
+ PyUnicode_GET_SIZE)
try:
from cpython cimport PyString_GET_SIZE
@@ -37,17 +34,12 @@ except ImportError:
cimport cpython
-isnan = np.isnan
-cdef double NaN = <double> np.NaN
-cdef double nan = NaN
from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyTime_Check, PyDelta_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT
-from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value
-
from tslib import NaT, Timestamp, Timedelta, array_to_datetime
from interval import Interval
from missing cimport checknull
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index d51583c7aa473..4ca87a777e497 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -24,9 +24,9 @@ is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2'
cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt):
- if isinstance(obj, np.ndarray) \
- or isinstance(obj, list) and len(obj) == cnt \
- or getattr(obj, 'shape', None) == (cnt,):
+ if (util.is_array(obj) or
+ isinstance(obj, list) and len(obj) == cnt or
+ getattr(obj, 'shape', None) == (cnt,)):
raise ValueError('function does not reduce')
return np.empty(size, dtype='O')
@@ -150,8 +150,7 @@ cdef class Reducer:
else:
res = self.f(chunk)
- if hasattr(res, 'values') and isinstance(
- res.values, np.ndarray):
+ if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if i == 0:
result = _get_result_array(res,
@@ -433,10 +432,10 @@ cdef class SeriesGrouper:
cdef inline _extract_result(object res):
""" extract the result object, it might be a 0-dim ndarray
or a len-1 0-dim, or a scalar """
- if hasattr(res, 'values') and isinstance(res.values, np.ndarray):
+ if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if not np.isscalar(res):
- if isinstance(res, np.ndarray):
+ if util.is_array(res):
if res.ndim == 0:
res = res.item()
elif res.ndim == 1 and len(res) == 1:
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index e15f276b39bf8..52ae32023e2b4 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -10,10 +10,9 @@ from datetime import datetime, timedelta
iNaT = util.get_nat()
cdef bint PY2 = sys.version_info[0] == 2
+cdef double nan = <double> np.NaN
-from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX,
- INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX,
- INT32_MAX, INT32_MIN, INT64_MAX, INT64_MIN)
+from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN
# core.common import for fast inference checks
@@ -331,7 +330,7 @@ def infer_dtype(object value, bint skipna=False):
bint seen_pdnat = False
bint seen_val = False
- if isinstance(value, np.ndarray):
+ if util.is_array(value):
values = value
elif hasattr(value, 'dtype'):
@@ -349,7 +348,7 @@ def infer_dtype(object value, bint skipna=False):
raise ValueError("cannot infer type for {0}".format(type(value)))
else:
- if not isinstance(value, list):
+ if not PyList_Check(value):
value = list(value)
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 700ba5b6e48f7..a0ac6389c0646 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -306,8 +306,8 @@ class _BaseOffset(object):
def __call__(self, other):
return self.apply(other)
- def __mul__(self, someInt):
- return self.__class__(n=someInt * self.n, normalize=self.normalize,
+ def __mul__(self, other):
+ return self.__class__(n=other * self.n, normalize=self.normalize,
**self.kwds)
def __neg__(self):
@@ -374,8 +374,8 @@ class _BaseOffset(object):
class BaseOffset(_BaseOffset):
# Here we add __rfoo__ methods that don't play well with cdef classes
- def __rmul__(self, someInt):
- return self.__mul__(someInt)
+ def __rmul__(self, other):
+ return self.__mul__(other)
def __radd__(self, other):
return self.__add__(other)
@@ -840,6 +840,8 @@ cpdef int roll_qtrday(datetime other, int n, int month, object day_opt,
-------
n : int number of periods to increment
"""
+ cdef:
+ int months_since
# TODO: Merge this with roll_yearday by setting modby=12 there?
# code de-duplication versus perf hit?
# TODO: with small adjustments this could be used in shift_quarters
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3ca150cda83c7..9463512ac11de 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1936,10 +1936,6 @@ def _convert_key(self, key, is_setter=False):
return key
-# 32-bit floating point machine epsilon
-_eps = 1.1920929e-07
-
-
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
@@ -1992,19 +1988,6 @@ def convert_to_index_sliceable(obj, key):
return None
-def is_index_slice(obj):
- def _is_valid_index(x):
- return (is_integer(x) or is_float(x) and
- np.allclose(x, int(x), rtol=_eps, atol=0))
-
- def _crit(v):
- return v is None or _is_valid_index(v)
-
- both_none = obj.start is None and obj.stop is None
-
- return not both_none and (_crit(obj.start) and _crit(obj.stop))
-
-
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 4b649927f8f72..257b0791e4841 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -19,7 +19,7 @@
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
-import pandas._libs.index as _index
+import pandas._libs.index as libindex
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
@@ -560,7 +560,7 @@ def _set_values(self, key, value):
key = key.values
values = self.values.to_dense()
- values[key] = _index.convert_scalar(values, value)
+ values[key] = libindex.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
| Miscellaneous cleanup, use `util.is_array` instead of `isinstance`, that kind of thing.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19360 | 2018-01-23T04:42:09Z | 2018-01-24T01:03:40Z | 2018-01-24T01:03:40Z | 2018-02-11T21:59:35Z |
Added cast blacklist for certain transform agg funcs | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 7f697003f44b9..71492154419fb 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -506,6 +506,7 @@ Groupby/Resample/Rolling
- Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`)
- Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`)
- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`)
+- Bug in ``transform`` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`)
-
Sparse
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index fc7a0faef0cf6..2c1deb9db7bba 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -345,6 +345,8 @@
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
+_cython_cast_blacklist = frozenset(['rank', 'count', 'size'])
+
class Grouper(object):
"""
@@ -965,6 +967,21 @@ def _try_cast(self, result, obj, numeric_only=False):
return result
+ def _transform_should_cast(self, func_nm):
+ """
+ Parameters:
+ -----------
+ func_nm: str
+ The name of the aggregation function being performed
+
+ Returns:
+ --------
+ bool
+ Whether transform should attempt to cast the result of aggregation
+ """
+ return (self.size().fillna(0) > 0).any() and (func_nm not in
+ _cython_cast_blacklist)
+
def _cython_transform(self, how, numeric_only=True):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
@@ -3333,7 +3350,7 @@ def transform(self, func, *args, **kwargs):
else:
# cythonized aggregation and merge
return self._transform_fast(
- lambda: getattr(self, func)(*args, **kwargs))
+ lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
@@ -3364,7 +3381,7 @@ def transform(self, func, *args, **kwargs):
result.index = self._selected_obj.index
return result
- def _transform_fast(self, func):
+ def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
@@ -3373,7 +3390,7 @@ def _transform_fast(self, func):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
- cast = (self.size().fillna(0) > 0).any()
+ cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
@@ -4127,15 +4144,15 @@ def transform(self, func, *args, **kwargs):
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
- return self._transform_fast(result, obj)
+ return self._transform_fast(result, obj, func)
- def _transform_fast(self, result, obj):
+ def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
- cast = (self.size().fillna(0) > 0).any()
+ cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 8f72da293a50c..4159d0f709a13 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -582,3 +582,28 @@ def test_transform_with_non_scalar_group(self):
'group.*',
df.groupby(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
+
+ @pytest.mark.parametrize('cols,exp,comp_func', [
+ ('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
+ (['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
+ tm.assert_frame_equal)
+ ])
+ @pytest.mark.parametrize('agg_func', [
+ 'count', 'rank', 'size'])
+ def test_transform_numeric_ret(self, cols, exp, comp_func, agg_func):
+ if agg_func == 'size' and isinstance(cols, list):
+ pytest.xfail("'size' transformation not supported with "
+ "NDFrameGroupy")
+
+ # GH 19200
+ df = pd.DataFrame(
+ {'a': pd.date_range('2018-01-01', periods=3),
+ 'b': range(3),
+ 'c': range(7, 10)})
+
+ result = df.groupby('b')[cols].transform(agg_func)
+
+ if agg_func == 'rank':
+ exp = exp.astype('float')
+
+ comp_func(result, exp)
| - [X] closes #19200
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19355 | 2018-01-23T01:25:58Z | 2018-01-23T11:00:56Z | 2018-01-23T11:00:56Z | 2018-01-23T17:09:45Z |
TST: Clean up pickle compression tests | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 4cf5c9da44697..4fe66d4cf7e1f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -4,6 +4,7 @@
import numpy
import pandas
import dateutil
+import pandas.util._test_decorators as td
def pytest_addoption(parser):
@@ -73,3 +74,22 @@ def ip():
is_dateutil_gt_261 = pytest.mark.skipif(
LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),
reason="dateutil stable version")
+
+
+@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
+ pytest.param('xz', marks=td.skip_if_no_lzma)])
+def compression(request):
+ """
+ Fixture for trying common compression types in compression tests
+ """
+ return request.param
+
+
+@pytest.fixture(params=[None, 'gzip', 'bz2',
+ pytest.param('xz', marks=td.skip_if_no_lzma)])
+def compression_no_zip(request):
+ """
+ Fixture for trying common compression types in compression tests
+ except zip
+ """
+ return request.param
diff --git a/pandas/tests/conftest.py b/pandas/tests/conftest.py
deleted file mode 100644
index 8f5d963927f60..0000000000000
--- a/pandas/tests/conftest.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import pytest
-import pandas.util._test_decorators as td
-
-
-@pytest.fixture(params=[None, 'gzip', 'bz2',
- pytest.param('xz', marks=td.skip_if_no_lzma)])
-def compression(request):
- """
- Fixture for trying common compression types in compression tests
- """
- return request.param
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index d89d57947bde2..a3ba34ae92283 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -919,7 +919,7 @@ def test_to_csv_path_is_none(self):
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
- def test_to_csv_compression(self, compression):
+ def test_to_csv_compression(self, compression_no_zip):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
@@ -927,19 +927,20 @@ def test_to_csv_compression(self, compression):
with ensure_clean() as filename:
- df.to_csv(filename, compression=compression)
+ df.to_csv(filename, compression=compression_no_zip)
# test the round trip - to_csv -> read_csv
- rs = read_csv(filename, compression=compression, index_col=0)
+ rs = read_csv(filename, compression=compression_no_zip,
+ index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is compressed
- with tm.decompress_file(filename, compression) as fh:
+ with tm.decompress_file(filename, compression_no_zip) as fh:
text = fh.read().decode('utf8')
for col in df.columns:
assert col in text
- with tm.decompress_file(filename, compression) as fh:
+ with tm.decompress_file(filename, compression_no_zip) as fh:
assert_frame_equal(df, read_csv(fh, index_col=0))
def test_to_csv_compression_value_error(self):
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index 2cf4c435bdc12..08335293f9292 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -5,17 +5,18 @@
from pandas.util.testing import assert_frame_equal, assert_raises_regex
-def test_compression_roundtrip(compression):
+def test_compression_roundtrip(compression_no_zip):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
- df.to_json(path, compression=compression)
- assert_frame_equal(df, pd.read_json(path, compression=compression))
+ df.to_json(path, compression=compression_no_zip)
+ assert_frame_equal(df, pd.read_json(path,
+ compression=compression_no_zip))
# explicitly ensure file was compressed.
- with tm.decompress_file(path, compression) as fh:
+ with tm.decompress_file(path, compression_no_zip) as fh:
result = fh.read().decode('utf8')
assert_frame_equal(df, pd.read_json(result))
@@ -40,7 +41,7 @@ def test_read_zipped_json():
assert_frame_equal(uncompressed_df, compressed_df)
-def test_with_s3_url(compression):
+def test_with_s3_url(compression_no_zip):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
moto = pytest.importorskip('moto')
@@ -51,31 +52,36 @@ def test_with_s3_url(compression):
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
- df.to_json(path, compression=compression)
+ df.to_json(path, compression=compression_no_zip)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
- compression=compression)
+ compression=compression_no_zip)
assert_frame_equal(df, roundtripped_df)
-def test_lines_with_compression(compression):
+def test_lines_with_compression(compression_no_zip):
+
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
- df.to_json(path, orient='records', lines=True, compression=compression)
+ df.to_json(path, orient='records', lines=True,
+ compression=compression_no_zip)
roundtripped_df = pd.read_json(path, lines=True,
- compression=compression)
+ compression=compression_no_zip)
assert_frame_equal(df, roundtripped_df)
-def test_chunksize_with_compression(compression):
+def test_chunksize_with_compression(compression_no_zip):
+
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
- df.to_json(path, orient='records', lines=True, compression=compression)
+ df.to_json(path, orient='records', lines=True,
+ compression=compression_no_zip)
- roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1,
- compression=compression))
+ res = pd.read_json(path, lines=True, chunksize=1,
+ compression=compression_no_zip)
+ roundtripped_df = pd.concat(res)
assert_frame_equal(df, roundtripped_df)
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 5d2ba8e4fa712..2ba3e174404c7 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -352,42 +352,7 @@ def compress_file(self, src_path, dest_path, compression):
f.write(fh.read())
f.close()
- def decompress_file(self, src_path, dest_path, compression):
- if compression is None:
- shutil.copyfile(src_path, dest_path)
- return
-
- if compression == 'gzip':
- import gzip
- f = gzip.open(src_path, "r")
- elif compression == 'bz2':
- import bz2
- f = bz2.BZ2File(src_path, "r")
- elif compression == 'zip':
- import zipfile
- zip_file = zipfile.ZipFile(src_path)
- zip_names = zip_file.namelist()
- if len(zip_names) == 1:
- f = zip_file.open(zip_names.pop())
- else:
- raise ValueError('ZIP file {} error. Only one file per ZIP.'
- .format(src_path))
- elif compression == 'xz':
- lzma = pandas.compat.import_lzma()
- f = lzma.LZMAFile(src_path, "r")
- else:
- msg = 'Unrecognized compression type: {}'.format(compression)
- raise ValueError(msg)
-
- with open(dest_path, "wb") as fh:
- fh.write(f.read())
- f.close()
-
- @pytest.mark.parametrize('compression', [
- None, 'gzip', 'bz2',
- pytest.param('xz', marks=td.skip_if_no_lzma) # issue 11666
- ])
- def test_write_explicit(self, compression, get_random_path):
+ def test_write_explicit(self, compression_no_zip, get_random_path):
base = get_random_path
path1 = base + ".compressed"
path2 = base + ".raw"
@@ -396,10 +361,12 @@ def test_write_explicit(self, compression, get_random_path):
df = tm.makeDataFrame()
# write to compressed file
- df.to_pickle(p1, compression=compression)
+ df.to_pickle(p1, compression=compression_no_zip)
# decompress
- self.decompress_file(p1, p2, compression=compression)
+ with tm.decompress_file(p1, compression=compression_no_zip) as f:
+ with open(p2, "wb") as fh:
+ fh.write(f.read())
# read decompressed file
df2 = pd.read_pickle(p2, compression=None)
@@ -435,17 +402,15 @@ def test_write_infer(self, ext, get_random_path):
df.to_pickle(p1)
# decompress
- self.decompress_file(p1, p2, compression=compression)
+ with tm.decompress_file(p1, compression=compression) as f:
+ with open(p2, "wb") as fh:
+ fh.write(f.read())
# read decompressed file
df2 = pd.read_pickle(p2, compression=None)
tm.assert_frame_equal(df, df2)
- @pytest.mark.parametrize('compression', [
- None, 'gzip', 'bz2', "zip",
- pytest.param('xz', marks=td.skip_if_no_lzma)
- ])
def test_read_explicit(self, compression, get_random_path):
base = get_random_path
path1 = base + ".raw"
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index ec26716f79446..62d1372525cc8 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -138,28 +138,29 @@ def test_to_csv_path_is_none(self):
csv_str = s.to_csv(path=None)
assert isinstance(csv_str, str)
- def test_to_csv_compression(self, compression):
+ def test_to_csv_compression(self, compression_no_zip):
s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'],
name='X')
with ensure_clean() as filename:
- s.to_csv(filename, compression=compression, header=True)
+ s.to_csv(filename, compression=compression_no_zip, header=True)
# test the round trip - to_csv -> read_csv
- rs = pd.read_csv(filename, compression=compression, index_col=0,
- squeeze=True)
+ rs = pd.read_csv(filename, compression=compression_no_zip,
+ index_col=0, squeeze=True)
assert_series_equal(s, rs)
# explicitly ensure file was compressed
- with tm.decompress_file(filename, compression=compression) as fh:
+ with tm.decompress_file(filename, compression_no_zip) as fh:
text = fh.read().decode('utf8')
assert s.name in text
- with tm.decompress_file(filename, compression=compression) as fh:
+ with tm.decompress_file(filename, compression_no_zip) as fh:
assert_series_equal(s, pd.read_csv(fh,
- index_col=0, squeeze=True))
+ index_col=0,
+ squeeze=True))
class TestSeriesIO(TestData):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3a06f6244da14..34e634f56aec6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -191,6 +191,15 @@ def decompress_file(path, compression):
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
+ elif compression == 'zip':
+ import zipfile
+ zip_file = zipfile.ZipFile(path)
+ zip_names = zip_file.namelist()
+ if len(zip_names) == 1:
+ f = zip_file.open(zip_names.pop())
+ else:
+ raise ValueError('ZIP file {} error. Only one file per ZIP.'
+ .format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
| xref #19226
Move compression fixture to top-level ``conftest.py``
Clean-up compression tests in ``pandas/tests/io/test_pickle.py`` and add zip to the ``decompress_file`` testing utility
Make some minor adjustments to the json and csv compression tests to skip zip compression when it's not valid
| https://api.github.com/repos/pandas-dev/pandas/pulls/19350 | 2018-01-22T20:37:09Z | 2018-01-24T10:06:44Z | 2018-01-24T10:06:44Z | 2018-01-24T10:06:52Z |
clarify redirection in ops | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index e0aa0a4a415e1..3db2dd849ccee 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -41,6 +41,297 @@
ABCIndex,
ABCPeriodIndex)
+
+def _gen_eval_kwargs(name):
+ """
+ Find the keyword arguments to pass to numexpr for the given operation.
+
+ Parameters
+ ----------
+ name : str
+
+ Returns
+ -------
+ eval_kwargs : dict
+
+ Examples
+ --------
+ >>> _gen_eval_kwargs("__add__")
+ {}
+
+ >>> _gen_eval_kwargs("rtruediv")
+ {"reversed": True, "truediv": True}
+ """
+ kwargs = {}
+
+ # Series and Panel appear to only pass __add__, __radd__, ...
+ # but DataFrame gets both these dunder names _and_ non-dunder names
+ # add, radd, ...
+ name = name.replace('__', '')
+
+ if name.startswith('r'):
+ if name not in ['radd', 'rand', 'ror', 'rxor']:
+ # Exclude commutative operations
+ kwargs['reversed'] = True
+
+ if name in ['truediv', 'rtruediv']:
+ kwargs['truediv'] = True
+
+ if name in ['ne']:
+ kwargs['masker'] = True
+
+ return kwargs
+
+
+def _gen_fill_zeros(name):
+ """
+ Find the appropriate fill value to use when filling in undefined values
+ in the results of the given operation caused by operating on
+ (generally dividing by) zero.
+
+ Parameters
+ ----------
+ name : str
+
+ Returns
+ -------
+ fill_value : {None, np.nan, np.inf}
+ """
+ name = name.strip('__')
+ if 'div' in name:
+ # truediv, floordiv, div, and reversed variants
+ fill_value = np.inf
+ elif 'mod' in name:
+ # mod, rmod
+ fill_value = np.nan
+ else:
+ fill_value = None
+ return fill_value
+
+
+# -----------------------------------------------------------------------------
+# Docstring Generation and Templates
+
+_op_descriptions = {
+ 'add': {'op': '+',
+ 'desc': 'Addition',
+ 'reversed': False,
+ 'reverse': 'radd'},
+ 'sub': {'op': '-',
+ 'desc': 'Subtraction',
+ 'reversed': False,
+ 'reverse': 'rsub'},
+ 'mul': {'op': '*',
+ 'desc': 'Multiplication',
+ 'reversed': False,
+ 'reverse': 'rmul'},
+ 'mod': {'op': '%',
+ 'desc': 'Modulo',
+ 'reversed': False,
+ 'reverse': 'rmod'},
+ 'pow': {'op': '**',
+ 'desc': 'Exponential power',
+ 'reversed': False,
+ 'reverse': 'rpow'},
+ 'truediv': {'op': '/',
+ 'desc': 'Floating division',
+ 'reversed': False,
+ 'reverse': 'rtruediv'},
+ 'floordiv': {'op': '//',
+ 'desc': 'Integer division',
+ 'reversed': False,
+ 'reverse': 'rfloordiv'},
+ 'divmod': {'op': 'divmod',
+ 'desc': 'Integer division and modulo',
+ 'reversed': False,
+ 'reverse': None},
+
+ 'eq': {'op': '==',
+ 'desc': 'Equal to',
+ 'reversed': False,
+ 'reverse': None},
+ 'ne': {'op': '!=',
+ 'desc': 'Not equal to',
+ 'reversed': False,
+ 'reverse': None},
+ 'lt': {'op': '<',
+ 'desc': 'Less than',
+ 'reversed': False,
+ 'reverse': None},
+ 'le': {'op': '<=',
+ 'desc': 'Less than or equal to',
+ 'reversed': False,
+ 'reverse': None},
+ 'gt': {'op': '>',
+ 'desc': 'Greater than',
+ 'reversed': False,
+ 'reverse': None},
+ 'ge': {'op': '>=',
+ 'desc': 'Greater than or equal to',
+ 'reversed': False,
+ 'reverse': None}}
+
+_op_names = list(_op_descriptions.keys())
+for key in _op_names:
+ reverse_op = _op_descriptions[key]['reverse']
+ if reverse_op is not None:
+ _op_descriptions[reverse_op] = _op_descriptions[key].copy()
+ _op_descriptions[reverse_op]['reversed'] = True
+ _op_descriptions[reverse_op]['reverse'] = key
+
+_flex_doc_SERIES = """
+{desc} of series and other, element-wise (binary operator `{op_name}`).
+
+Equivalent to ``{equiv}``, but with support to substitute a fill_value for
+missing data in one of the inputs.
+
+Parameters
+----------
+other : Series or scalar value
+fill_value : None or float value, default None (NaN)
+ Fill missing (NaN) values with this value. If both Series are
+ missing, the result will be missing
+level : int or name
+ Broadcast across a level, matching Index values on the
+ passed MultiIndex level
+
+Returns
+-------
+result : Series
+
+See also
+--------
+Series.{reverse}
+"""
+
+_arith_doc_FRAME = """
+Binary operator %s with support to substitute a fill_value for missing data in
+one of the inputs
+
+Parameters
+----------
+other : Series, DataFrame, or constant
+axis : {0, 1, 'index', 'columns'}
+ For Series input, axis to match Series index on
+fill_value : None or float value, default None
+ Fill missing (NaN) values with this value. If both DataFrame locations are
+ missing, the result will be missing
+level : int or name
+ Broadcast across a level, matching Index values on the
+ passed MultiIndex level
+
+Notes
+-----
+Mismatched indices will be unioned together
+
+Returns
+-------
+result : DataFrame
+"""
+
+_flex_doc_FRAME = """
+{desc} of dataframe and other, element-wise (binary operator `{op_name}`).
+
+Equivalent to ``{equiv}``, but with support to substitute a fill_value for
+missing data in one of the inputs.
+
+Parameters
+----------
+other : Series, DataFrame, or constant
+axis : {{0, 1, 'index', 'columns'}}
+ For Series input, axis to match Series index on
+fill_value : None or float value, default None
+ Fill missing (NaN) values with this value. If both DataFrame
+ locations are missing, the result will be missing
+level : int or name
+ Broadcast across a level, matching Index values on the
+ passed MultiIndex level
+
+Notes
+-----
+Mismatched indices will be unioned together
+
+Returns
+-------
+result : DataFrame
+
+See also
+--------
+DataFrame.{reverse}
+"""
+
+_flex_doc_PANEL = """
+{desc} of series and other, element-wise (binary operator `{op_name}`).
+Equivalent to ``{equiv}``.
+
+Parameters
+----------
+other : DataFrame or Panel
+axis : {{items, major_axis, minor_axis}}
+ Axis to broadcast over
+
+Returns
+-------
+Panel
+
+See also
+--------
+Panel.{reverse}
+"""
+
+
+_agg_doc_PANEL = """
+Wrapper method for {wrp_method}
+
+Parameters
+----------
+other : {construct} or {cls_name}
+axis : {{{axis_order}}}
+ Axis to broadcast over
+
+Returns
+-------
+{cls_name}
+"""
+
+
+def _make_flex_doc(op_name, typ):
+ """
+ Make the appropriate substitutions for the given operation and class-typ
+ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
+ to attach to a generated method.
+
+ Parameters
+ ----------
+ op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
+ typ : str {series, 'dataframe']}
+
+ Returns
+ -------
+ doc : str
+ """
+ op_name = op_name.replace('__', '')
+ op_desc = _op_descriptions[op_name]
+
+ if op_desc['reversed']:
+ equiv = 'other ' + op_desc['op'] + ' ' + typ
+ else:
+ equiv = typ + ' ' + op_desc['op'] + ' other'
+
+ if typ == 'series':
+ base_doc = _flex_doc_SERIES
+ elif typ == 'dataframe':
+ base_doc = _flex_doc_FRAME
+ elif typ == 'panel':
+ base_doc = _flex_doc_PANEL
+ else:
+ raise AssertionError('Invalid typ argument.')
+
+ doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,
+ equiv=equiv, reverse=op_desc['reverse'])
+ return doc
+
+
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
@@ -82,35 +373,31 @@ def names(x):
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
- truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
- default_axis=default_axis, fill_zeros=np.inf),
+ default_axis=default_axis),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
- default_axis=default_axis, fill_zeros=np.nan),
+ default_axis=default_axis),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
- default_axis=default_axis, reversed=True),
+ default_axis=default_axis),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
- default_axis=default_axis, reversed=True),
+ default_axis=default_axis),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
- names('rtruediv'), op('/'), truediv=True,
- fill_zeros=np.inf, default_axis=default_axis,
- reversed=True),
+ names('rtruediv'), op('/'),
+ default_axis=default_axis),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
- default_axis=default_axis, fill_zeros=np.inf,
- reversed=True),
+ default_axis=default_axis),
rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
- default_axis=default_axis, reversed=True),
+ default_axis=default_axis),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
- default_axis=default_axis, fill_zeros=np.nan,
- reversed=True),)
+ default_axis=default_axis))
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
@@ -119,11 +406,11 @@ def names(x):
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
- ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
+ ne=comp_method(operator.ne, names('ne'), op('!=')),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
- ge=comp_method(operator.ge, names('ge'), op('>=')), ))
+ ge=comp_method(operator.ge, names('ge'), op('>='))))
if bool_method:
new_methods.update(
dict(and_=bool_method(operator.and_, names('and_'), op('&')),
@@ -138,13 +425,10 @@ def names(x):
names('rxor'), op('^'))))
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
- new_methods['divmod'] = arith_method(
- divmod,
- names('divmod'),
- None,
- default_axis=default_axis,
- construct_result=_construct_divmod_result,
- )
+ new_methods['divmod'] = arith_method(divmod,
+ names('divmod'),
+ None,
+ default_axis=default_axis)
new_methods = {names(k): v for k, v in new_methods.items()}
return new_methods
@@ -170,7 +454,7 @@ def add_special_arithmetic_methods(cls, arith_method=None,
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
- f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
+ f(op, name, str_rep, default_axis=None)
comp_method : function (optional)
factory for rich comparison - signature: f(op, name, str_rep)
bool_method : function (optional)
@@ -242,7 +526,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
- f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
+ f(op, name, str_rep, default_axis=None)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
@@ -267,6 +551,9 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
add_methods(cls, new_methods=new_methods, force=force)
+# -----------------------------------------------------------------------------
+# Series
+
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
@@ -310,12 +597,16 @@ def _construct_divmod_result(left, result, index, name, dtype):
)
-def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
- construct_result=_construct_result, **eval_kwargs):
+def _arith_method_SERIES(op, name, str_rep, default_axis=None):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
+ eval_kwargs = _gen_eval_kwargs(name)
+ fill_zeros = _gen_fill_zeros(name)
+ construct_result = (_construct_divmod_result
+ if op is divmod else _construct_result)
+
def na_op(x, y):
import pandas.core.computation.expressions as expressions
@@ -448,11 +739,12 @@ def _comp_method_OBJECT_ARRAY(op, x, y):
return result
-def _comp_method_SERIES(op, name, str_rep, masker=False):
+def _comp_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
+ masker = _gen_eval_kwargs(name).get('masker', False)
def na_op(x, y):
@@ -641,109 +933,8 @@ def wrapper(self, other):
return wrapper
-_op_descriptions = {'add': {'op': '+',
- 'desc': 'Addition',
- 'reversed': False,
- 'reverse': 'radd'},
- 'sub': {'op': '-',
- 'desc': 'Subtraction',
- 'reversed': False,
- 'reverse': 'rsub'},
- 'mul': {'op': '*',
- 'desc': 'Multiplication',
- 'reversed': False,
- 'reverse': 'rmul'},
- 'mod': {'op': '%',
- 'desc': 'Modulo',
- 'reversed': False,
- 'reverse': 'rmod'},
- 'pow': {'op': '**',
- 'desc': 'Exponential power',
- 'reversed': False,
- 'reverse': 'rpow'},
- 'truediv': {'op': '/',
- 'desc': 'Floating division',
- 'reversed': False,
- 'reverse': 'rtruediv'},
- 'floordiv': {'op': '//',
- 'desc': 'Integer division',
- 'reversed': False,
- 'reverse': 'rfloordiv'},
- 'divmod': {'op': 'divmod',
- 'desc': 'Integer division and modulo',
- 'reversed': False,
- 'reverse': None},
-
- 'eq': {'op': '==',
- 'desc': 'Equal to',
- 'reversed': False,
- 'reverse': None},
- 'ne': {'op': '!=',
- 'desc': 'Not equal to',
- 'reversed': False,
- 'reverse': None},
- 'lt': {'op': '<',
- 'desc': 'Less than',
- 'reversed': False,
- 'reverse': None},
- 'le': {'op': '<=',
- 'desc': 'Less than or equal to',
- 'reversed': False,
- 'reverse': None},
- 'gt': {'op': '>',
- 'desc': 'Greater than',
- 'reversed': False,
- 'reverse': None},
- 'ge': {'op': '>=',
- 'desc': 'Greater than or equal to',
- 'reversed': False,
- 'reverse': None}}
-
-_op_names = list(_op_descriptions.keys())
-for k in _op_names:
- reverse_op = _op_descriptions[k]['reverse']
- _op_descriptions[reverse_op] = _op_descriptions[k].copy()
- _op_descriptions[reverse_op]['reversed'] = True
- _op_descriptions[reverse_op]['reverse'] = k
-
-
-_flex_doc_SERIES = """
-%s of series and other, element-wise (binary operator `%s`).
-
-Equivalent to ``%s``, but with support to substitute a fill_value for
-missing data in one of the inputs.
-
-Parameters
-----------
-other : Series or scalar value
-fill_value : None or float value, default None (NaN)
- Fill missing (NaN) values with this value. If both Series are
- missing, the result will be missing
-level : int or name
- Broadcast across a level, matching Index values on the
- passed MultiIndex level
-
-Returns
--------
-result : Series
-
-See also
---------
-Series.%s
-"""
-
-
-def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None,
- **eval_kwargs):
- op_name = name.replace('__', '')
- op_desc = _op_descriptions[op_name]
- if op_desc['reversed']:
- equiv = 'other ' + op_desc['op'] + ' series'
- else:
- equiv = 'series ' + op_desc['op'] + ' other'
-
- doc = _flex_doc_SERIES % (op_desc['desc'], op_name, equiv,
- op_desc['reverse'])
+def _flex_method_SERIES(op, name, str_rep, default_axis=None):
+ doc = _make_flex_doc(name, 'series')
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
@@ -776,62 +967,9 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
bool_method=_bool_method_SERIES,
have_divmod=True)
-_arith_doc_FRAME = """
-Binary operator %s with support to substitute a fill_value for missing data in
-one of the inputs
-
-Parameters
-----------
-other : Series, DataFrame, or constant
-axis : {0, 1, 'index', 'columns'}
- For Series input, axis to match Series index on
-fill_value : None or float value, default None
- Fill missing (NaN) values with this value. If both DataFrame locations are
- missing, the result will be missing
-level : int or name
- Broadcast across a level, matching Index values on the
- passed MultiIndex level
-
-Notes
------
-Mismatched indices will be unioned together
-
-Returns
--------
-result : DataFrame
-"""
-
-_flex_doc_FRAME = """
-%s of dataframe and other, element-wise (binary operator `%s`).
-
-Equivalent to ``%s``, but with support to substitute a fill_value for
-missing data in one of the inputs.
-
-Parameters
-----------
-other : Series, DataFrame, or constant
-axis : {0, 1, 'index', 'columns'}
- For Series input, axis to match Series index on
-fill_value : None or float value, default None
- Fill missing (NaN) values with this value. If both DataFrame
- locations are missing, the result will be missing
-level : int or name
- Broadcast across a level, matching Index values on the
- passed MultiIndex level
-
-Notes
------
-Mismatched indices will be unioned together
-
-Returns
--------
-result : DataFrame
-
-See also
---------
-DataFrame.%s
-"""
+# -----------------------------------------------------------------------------
+# DataFrame
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
@@ -877,8 +1015,10 @@ def to_series(right):
return right
-def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
- fill_zeros=None, **eval_kwargs):
+def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns'):
+ eval_kwargs = _gen_eval_kwargs(name)
+ fill_zeros = _gen_fill_zeros(name)
+
def na_op(x, y):
import pandas.core.computation.expressions as expressions
@@ -923,15 +1063,8 @@ def na_op(x, y):
return result
if name in _op_descriptions:
- op_name = name.replace('__', '')
- op_desc = _op_descriptions[op_name]
- if op_desc['reversed']:
- equiv = 'other ' + op_desc['op'] + ' dataframe'
- else:
- equiv = 'dataframe ' + op_desc['op'] + ' other'
-
- doc = _flex_doc_FRAME % (op_desc['desc'], op_name, equiv,
- op_desc['reverse'])
+ # i.e. include "add" but not "__add__"
+ doc = _make_flex_doc(name, 'dataframe')
else:
doc = _arith_doc_FRAME % name
@@ -955,9 +1088,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
return f
-# Masker unused for now
-def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
- masker=False):
+def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns'):
+
def na_op(x, y):
try:
with np.errstate(invalid='ignore'):
@@ -1003,7 +1135,7 @@ def f(self, other, axis=default_axis, level=None):
return f
-def _comp_method_FRAME(func, name, str_rep, masker=False):
+def _comp_method_FRAME(func, name, str_rep):
@Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other):
if isinstance(other, ABCDataFrame): # Another DataFrame
@@ -1032,8 +1164,10 @@ def f(self, other):
bool_method=_arith_method_FRAME)
-def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
- default_axis=None, **eval_kwargs):
+# -----------------------------------------------------------------------------
+# Panel
+
+def _arith_method_PANEL(op, name, str_rep=None, default_axis=None):
# work only for scalars
def f(self, other):
@@ -1048,7 +1182,7 @@ def f(self, other):
return f
-def _comp_method_PANEL(op, name, str_rep=None, masker=False):
+def _comp_method_PANEL(op, name, str_rep=None):
def na_op(x, y):
import pandas.core.computation.expressions as expressions
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index ae86074ce2d05..afdd9bae3006f 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1525,8 +1525,11 @@ def _extract_axis(self, data, axis=0, intersect=False):
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
- def _panel_arith_method(op, name, str_rep=None, default_axis=None,
- fill_zeros=None, **eval_kwargs):
+ def _panel_arith_method(op, name, str_rep=None, default_axis=None):
+
+ eval_kwargs = ops._gen_eval_kwargs(name)
+ fill_zeros = ops._gen_fill_zeros(name)
+
def na_op(x, y):
import pandas.core.computation.expressions as expressions
@@ -1544,50 +1547,10 @@ def na_op(x, y):
return result
if name in ops._op_descriptions:
- op_name = name.replace('__', '')
- op_desc = ops._op_descriptions[op_name]
- if op_desc['reversed']:
- equiv = 'other ' + op_desc['op'] + ' panel'
- else:
- equiv = 'panel ' + op_desc['op'] + ' other'
-
- _op_doc = """
-{desc} of series and other, element-wise (binary operator `{op_name}`).
-Equivalent to ``{equiv}``.
-
-Parameters
-----------
-other : {construct} or {cls_name}
-axis : {{{axis_order}}}
- Axis to broadcast over
-
-Returns
--------
-{cls_name}
-
-See also
---------
-{cls_name}.{reverse}\n"""
- doc = _op_doc.format(
- desc=op_desc['desc'], op_name=op_name, equiv=equiv,
- construct=cls._constructor_sliced.__name__,
- cls_name=cls.__name__, reverse=op_desc['reverse'],
- axis_order=', '.join(cls._AXIS_ORDERS))
+ doc = ops._make_flex_doc(name, 'panel')
else:
# doc strings substitors
- _agg_doc = """
- Wrapper method for {wrp_method}
-
- Parameters
- ----------
- other : {construct} or {cls_name}
- axis : {{{axis_order}}}
- Axis to broadcast over
-
- Returns
- -------
- {cls_name}\n"""
- doc = _agg_doc.format(
+ doc = ops._agg_doc_PANEL.format(
construct=cls._constructor_sliced.__name__,
cls_name=cls.__name__, wrp_method=name,
axis_order=', '.join(cls._AXIS_ORDERS))
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 9b2650359bf68..059e399593971 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -43,8 +43,7 @@
_sparray_doc_kwargs = dict(klass='SparseArray')
-def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
- **eval_kwargs):
+def _arith_method_SPARSE_ARRAY(op, name, str_rep=None, default_axis=None):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
@@ -864,7 +863,8 @@ def _make_index(length, indices, kind):
return index
-ops.add_special_arithmetic_methods(SparseArray, arith_method=_arith_method,
- comp_method=_arith_method,
- bool_method=_arith_method,
+ops.add_special_arithmetic_methods(SparseArray,
+ arith_method=_arith_method_SPARSE_ARRAY,
+ comp_method=_arith_method_SPARSE_ARRAY,
+ bool_method=_arith_method_SPARSE_ARRAY,
use_numexpr=False)
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 4b649927f8f72..3506284161660 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -41,13 +41,12 @@
# Wrapper function for Series arithmetic methods
-def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
- **eval_kwargs):
+def _arith_method_SPARSE_SERIES(op, name, str_rep=None, default_axis=None):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
- str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
+ str_rep and default_axis are not used, but are
present for compatibility.
"""
@@ -864,7 +863,8 @@ def from_coo(cls, A, dense_index=False):
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
-ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
- comp_method=_arith_method,
+ops.add_special_arithmetic_methods(SparseSeries,
+ arith_method=_arith_method_SPARSE_SERIES,
+ comp_method=_arith_method_SPARSE_SERIES,
bool_method=None, use_numexpr=False,
force=True)
| core.ops involves a _lot_ of redirection. This decreases some of that redirection (and some redundancy) by implementing functions that show the logic of how some of the args/kwargs are chosen. The goal is to make it so future debugging can be done without going through `func.im_func.func_closure[1].cell_contents.func_closure...`
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19346 | 2018-01-22T16:53:32Z | 2018-01-25T11:59:05Z | 2018-01-25T11:59:05Z | 2018-02-11T22:00:13Z |
LINT: Adding scripts directory to lint, and fixing flake issues on them (#18949) | diff --git a/ci/lint.sh b/ci/lint.sh
index 98b33c0803d90..49bf9a690b990 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -30,6 +30,13 @@ if [ "$LINT" ]; then
fi
echo "Linting asv_bench/benchmarks/*.py DONE"
+ echo "Linting scripts/*.py"
+ flake8 scripts --filename=*.py
+ if [ $? -ne "0" ]; then
+ RET=1
+ fi
+ echo "Linting scripts/*.py DONE"
+
echo "Linting *.pyx"
flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403
if [ $? -ne "0" ]; then
diff --git a/scripts/announce.py b/scripts/announce.py
old mode 100644
new mode 100755
diff --git a/scripts/api_rst_coverage.py b/scripts/api_rst_coverage.py
index 28e761ef256d0..4800e80d82891 100755
--- a/scripts/api_rst_coverage.py
+++ b/scripts/api_rst_coverage.py
@@ -17,9 +17,11 @@
$ PYTHONPATH=.. ./api_rst_coverage.py
"""
-import pandas as pd
-import inspect
+import os
import re
+import inspect
+import pandas as pd
+
def main():
# classes whose members to check
@@ -61,13 +63,17 @@ def add_notes(x):
# class members
class_members = set()
for cls in classes:
- class_members.update([cls.__name__ + '.' + x[0] for x in inspect.getmembers(cls)])
+ for member in inspect.getmembers(cls):
+ class_members.add('{cls}.{member}'.format(cls=cls.__name__,
+ member=member[0]))
# class members referenced in api.rst
api_rst_members = set()
- file_name = '../doc/source/api.rst'
- with open(file_name, 'r') as f:
- pattern = re.compile('({})\.(\w+)'.format('|'.join(cls.__name__ for cls in classes)))
+ base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ api_rst_fname = os.path.join(base_path, 'doc', 'source', 'api.rst')
+ class_names = (cls.__name__ for cls in classes)
+ pattern = re.compile('({})\.(\w+)'.format('|'.join(class_names)))
+ with open(api_rst_fname, 'r') as f:
for line in f:
match = pattern.search(line)
if match:
@@ -75,7 +81,8 @@ def add_notes(x):
print()
print("Documented members in api.rst that aren't actual class members:")
- for x in sorted(api_rst_members.difference(class_members), key=class_name_sort_key):
+ for x in sorted(api_rst_members.difference(class_members),
+ key=class_name_sort_key):
print(x)
print()
@@ -86,5 +93,6 @@ def add_notes(x):
if '._' not in x:
print(add_notes(x))
+
if __name__ == "__main__":
main()
diff --git a/scripts/build_dist_for_release.sh b/scripts/build_dist_for_release.sh
old mode 100644
new mode 100755
diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py
old mode 100644
new mode 100755
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index 0dd609417d7ba..29eb4161718ff 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -1,135 +1,148 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-
# copyright 2013, y-p @ github
-
-from __future__ import print_function
-from pandas.compat import range, lrange, map, string_types, text_type
-
-"""Search the git history for all commits touching a named method
+"""
+Search the git history for all commits touching a named method
You need the sh module to run this
-WARNING: this script uses git clean -f, running it on a repo with untracked files
-will probably erase them.
+WARNING: this script uses git clean -f, running it on a repo with untracked
+files will probably erase them.
+
+Usage::
+ $ ./find_commits_touching_func.py (see arguments below)
"""
+from __future__ import print_function
import logging
import re
import os
+import argparse
from collections import namedtuple
-from pandas.compat import parse_date
-
+from pandas.compat import lrange, map, string_types, text_type, parse_date
try:
import sh
except ImportError:
- raise ImportError("The 'sh' package is required in order to run this script. ")
+ raise ImportError("The 'sh' package is required to run this script.")
-import argparse
desc = """
Find all commits touching a specified function across the codebase.
""".strip()
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument('funcname', metavar='FUNCNAME',
- help='Name of function/method to search for changes on.')
+ help='Name of function/method to search for changes on')
argparser.add_argument('-f', '--file-masks', metavar='f_re(,f_re)*',
default=["\.py.?$"],
- help='comma separated list of regexes to match filenames against\n'+
- 'defaults all .py? files')
+ help='comma separated list of regexes to match '
+ 'filenames against\ndefaults all .py? files')
argparser.add_argument('-d', '--dir-masks', metavar='d_re(,d_re)*',
default=[],
- help='comma separated list of regexes to match base path against')
+ help='comma separated list of regexes to match base '
+ 'path against')
argparser.add_argument('-p', '--path-masks', metavar='p_re(,p_re)*',
default=[],
- help='comma separated list of regexes to match full file path against')
+ help='comma separated list of regexes to match full '
+ 'file path against')
argparser.add_argument('-y', '--saw-the-warning',
- action='store_true',default=False,
- help='must specify this to run, acknowledge you realize this will erase untracked files')
+ action='store_true', default=False,
+ help='must specify this to run, acknowledge you '
+ 'realize this will erase untracked files')
argparser.add_argument('--debug-level',
default="CRITICAL",
- help='debug level of messages (DEBUG,INFO,etc...)')
-
+ help='debug level of messages (DEBUG, INFO, etc...)')
args = argparser.parse_args()
lfmt = logging.Formatter(fmt='%(levelname)-8s %(message)s',
- datefmt='%m-%d %H:%M:%S'
-)
-
+ datefmt='%m-%d %H:%M:%S')
shh = logging.StreamHandler()
shh.setFormatter(lfmt)
-
-logger=logging.getLogger("findit")
+logger = logging.getLogger("findit")
logger.addHandler(shh)
+Hit = namedtuple("Hit", "commit path")
+HASH_LEN = 8
-Hit=namedtuple("Hit","commit path")
-HASH_LEN=8
def clean_checkout(comm):
- h,s,d = get_commit_vitals(comm)
+ h, s, d = get_commit_vitals(comm)
if len(s) > 60:
s = s[:60] + "..."
- s=s.split("\n")[0]
- logger.info("CO: %s %s" % (comm,s ))
+ s = s.split("\n")[0]
+ logger.info("CO: %s %s" % (comm, s))
- sh.git('checkout', comm ,_tty_out=False)
+ sh.git('checkout', comm, _tty_out=False)
sh.git('clean', '-f')
-def get_hits(defname,files=()):
- cs=set()
+
+def get_hits(defname, files=()):
+ cs = set()
for f in files:
try:
- r=sh.git('blame', '-L', '/def\s*{start}/,/def/'.format(start=defname),f,_tty_out=False)
+ r = sh.git('blame',
+ '-L',
+ '/def\s*{start}/,/def/'.format(start=defname),
+ f,
+ _tty_out=False)
except sh.ErrorReturnCode_128:
logger.debug("no matches in %s" % f)
continue
lines = r.strip().splitlines()[:-1]
# remove comment lines
- lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#",x)]
- hits = set(map(lambda x: x.split(" ")[0],lines))
- cs.update(set(Hit(commit=c,path=f) for c in hits))
+ lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#", x)]
+ hits = set(map(lambda x: x.split(" ")[0], lines))
+ cs.update(set(Hit(commit=c, path=f) for c in hits))
return cs
-def get_commit_info(c,fmt,sep='\t'):
- r=sh.git('log', "--format={}".format(fmt), '{}^..{}'.format(c,c),"-n","1",_tty_out=False)
+
+def get_commit_info(c, fmt, sep='\t'):
+ r = sh.git('log',
+ "--format={}".format(fmt),
+ '{}^..{}'.format(c, c),
+ "-n",
+ "1",
+ _tty_out=False)
return text_type(r).split(sep)
-def get_commit_vitals(c,hlen=HASH_LEN):
- h,s,d= get_commit_info(c,'%H\t%s\t%ci',"\t")
- return h[:hlen],s,parse_date(d)
-def file_filter(state,dirname,fnames):
- if args.dir_masks and not any(re.search(x,dirname) for x in args.dir_masks):
+def get_commit_vitals(c, hlen=HASH_LEN):
+ h, s, d = get_commit_info(c, '%H\t%s\t%ci', "\t")
+ return h[:hlen], s, parse_date(d)
+
+
+def file_filter(state, dirname, fnames):
+ if (args.dir_masks and
+ not any(re.search(x, dirname) for x in args.dir_masks)):
return
for f in fnames:
- p = os.path.abspath(os.path.join(os.path.realpath(dirname),f))
- if any(re.search(x,f) for x in args.file_masks)\
- or any(re.search(x,p) for x in args.path_masks):
+ p = os.path.abspath(os.path.join(os.path.realpath(dirname), f))
+ if (any(re.search(x, f) for x in args.file_masks) or
+ any(re.search(x, p) for x in args.path_masks)):
if os.path.isfile(p):
state['files'].append(p)
-def search(defname,head_commit="HEAD"):
- HEAD,s = get_commit_vitals("HEAD")[:2]
- logger.info("HEAD at %s: %s" % (HEAD,s))
+
+def search(defname, head_commit="HEAD"):
+ HEAD, s = get_commit_vitals("HEAD")[:2]
+ logger.info("HEAD at %s: %s" % (HEAD, s))
done_commits = set()
# allhits = set()
files = []
state = dict(files=files)
- os.path.walk('.',file_filter,state)
+ os.walk('.', file_filter, state)
# files now holds a list of paths to files
# seed with hits from q
- allhits= set(get_hits(defname, files = files))
+ allhits = set(get_hits(defname, files=files))
q = set([HEAD])
try:
while q:
- h=q.pop()
+ h = q.pop()
clean_checkout(h)
- hits = get_hits(defname, files = files)
+ hits = get_hits(defname, files=files)
for x in hits:
- prevc = get_commit_vitals(x.commit+"^")[0]
+ prevc = get_commit_vitals(x.commit + "^")[0]
if prevc not in done_commits:
q.add(prevc)
allhits.update(hits)
@@ -141,43 +154,46 @@ def search(defname,head_commit="HEAD"):
clean_checkout(HEAD)
return allhits
+
def pprint_hits(hits):
- SUBJ_LEN=50
+ SUBJ_LEN = 50
PATH_LEN = 20
- hits=list(hits)
+ hits = list(hits)
max_p = 0
for hit in hits:
- p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1]
- max_p=max(max_p,len(p))
+ p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1]
+ max_p = max(max_p, len(p))
if max_p < PATH_LEN:
SUBJ_LEN += PATH_LEN - max_p
PATH_LEN = max_p
def sorter(i):
- h,s,d=get_commit_vitals(hits[i].commit)
- return hits[i].path,d
+ h, s, d = get_commit_vitals(hits[i].commit)
+ return hits[i].path, d
- print("\nThese commits touched the %s method in these files on these dates:\n" \
- % args.funcname)
- for i in sorted(lrange(len(hits)),key=sorter):
+ print(('\nThese commits touched the %s method in these files '
+ 'on these dates:\n') % args.funcname)
+ for i in sorted(lrange(len(hits)), key=sorter):
hit = hits[i]
- h,s,d=get_commit_vitals(hit.commit)
- p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1]
+ h, s, d = get_commit_vitals(hit.commit)
+ p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1]
fmt = "{:%d} {:10} {:<%d} {:<%d}" % (HASH_LEN, SUBJ_LEN, PATH_LEN)
if len(s) > SUBJ_LEN:
- s = s[:SUBJ_LEN-5] + " ..."
- print(fmt.format(h[:HASH_LEN],d.isoformat()[:10],s,p[-20:]) )
+ s = s[:SUBJ_LEN - 5] + " ..."
+ print(fmt.format(h[:HASH_LEN], d.isoformat()[:10], s, p[-20:]))
print("\n")
+
def main():
if not args.saw_the_warning:
argparser.print_help()
print("""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-WARNING: this script uses git clean -f, running it on a repo with untracked files.
+WARNING:
+this script uses git clean -f, running it on a repo with untracked files.
It's recommended that you make a fresh clone and run from its root directory.
You must specify the -y argument to ignore this warning.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -190,12 +206,11 @@ def main():
if isinstance(args.dir_masks, string_types):
args.dir_masks = args.dir_masks.split(',')
- logger.setLevel(getattr(logging,args.debug_level))
+ logger.setLevel(getattr(logging, args.debug_level))
- hits=search(args.funcname)
+ hits = search(args.funcname)
pprint_hits(hits)
- pass
if __name__ == "__main__":
import sys
diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py
index 32b23a67b187f..a135c8e5171a1 100755
--- a/scripts/find_undoc_args.py
+++ b/scripts/find_undoc_args.py
@@ -1,126 +1,135 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+"""
+Script that compares the signature arguments with the ones in the docsting
+and returns the differences in plain text or GitHub task list format.
+Usage::
+ $ ./find_undoc_args.py (see arguments below)
+"""
from __future__ import print_function
-
+import sys
from collections import namedtuple
-from itertools import islice
import types
import os
import re
import argparse
-#http://docs.python.org/2/library/argparse.html
-# arg name is positional is not prefixed with - or --
+import inspect
+
parser = argparse.ArgumentParser(description='Program description.')
parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False,
- default=None,
- help='full path relative to which paths wills be reported',action='store')
-parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True,
- help='name of package to import and examine',action='store')
-parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False,
- help='github project where the code lives, e.g. "pandas-dev/pandas"',
- default=None,action='store')
-
+ default=None, action='store',
+ help='full path relative to which paths wills be reported')
+parser.add_argument('-m', '--module', metavar='MODULE', type=str,
+ required=True, action='store',
+ help='name of package to import and examine')
+parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,
+ required=False, default=None, action='store',
+ help='github project where the code lives, '
+ 'e.g. "pandas-dev/pandas"')
args = parser.parse_args()
-Entry=namedtuple("Entry","func path lnum undoc_names missing_args nsig_names ndoc_names")
+Entry = namedtuple('Entry',
+ 'func path lnum undoc_names missing_args '
+ 'nsig_names ndoc_names')
-def entry_gen(root_ns,module_name):
- q=[root_ns]
- seen=set()
+def entry_gen(root_ns, module_name):
+ """Walk and yield all methods and functions in the module root_ns and
+ submodules."""
+ q = [root_ns]
+ seen = set()
while q:
ns = q.pop()
for x in dir(ns):
- cand = getattr(ns,x)
- if (isinstance(cand,types.ModuleType)
- and cand.__name__ not in seen
- and cand.__name__.startswith(module_name)):
- # print(cand.__name__)
+ cand = getattr(ns, x)
+ if (isinstance(cand, types.ModuleType) and
+ cand.__name__ not in seen and
+ cand.__name__.startswith(module_name)):
seen.add(cand.__name__)
- q.insert(0,cand)
- elif (isinstance(cand,(types.MethodType,types.FunctionType)) and
+ q.insert(0, cand)
+ elif (isinstance(cand, (types.MethodType, types.FunctionType)) and
cand not in seen and cand.__doc__):
seen.add(cand)
yield cand
+
def cmp_docstring_sig(f):
+ """Return an `Entry` object describing the differences between the
+ arguments in the signature and the documented ones."""
def build_loc(f):
- path=f.__code__.co_filename.split(args.path,1)[-1][1:]
- return dict(path=path,lnum=f.__code__.co_firstlineno)
+ path = f.__code__.co_filename.split(args.path, 1)[-1][1:]
+ return dict(path=path, lnum=f.__code__.co_firstlineno)
- import inspect
- sig_names=set(inspect.getargspec(f).args)
+ sig_names = set(inspect.getargspec(f).args)
+ # XXX numpydoc can be used to get the list of parameters
doc = f.__doc__.lower()
- doc = re.split("^\s*parameters\s*",doc,1,re.M)[-1]
- doc = re.split("^\s*returns*",doc,1,re.M)[0]
- doc_names={x.split(":")[0].strip() for x in doc.split("\n")
- if re.match("\s+[\w_]+\s*:",x)}
- sig_names.discard("self")
- doc_names.discard("kwds")
- doc_names.discard("kwargs")
- doc_names.discard("args")
- return Entry(func=f,path=build_loc(f)['path'],lnum=build_loc(f)['lnum'],
+ doc = re.split('^\s*parameters\s*', doc, 1, re.M)[-1]
+ doc = re.split('^\s*returns*', doc, 1, re.M)[0]
+ doc_names = {x.split(":")[0].strip() for x in doc.split('\n')
+ if re.match('\s+[\w_]+\s*:', x)}
+ sig_names.discard('self')
+ doc_names.discard('kwds')
+ doc_names.discard('kwargs')
+ doc_names.discard('args')
+ return Entry(func=f, path=build_loc(f)['path'], lnum=build_loc(f)['lnum'],
undoc_names=sig_names.difference(doc_names),
- missing_args=doc_names.difference(sig_names),nsig_names=len(sig_names),
- ndoc_names=len(doc_names))
+ missing_args=doc_names.difference(sig_names),
+ nsig_names=len(sig_names), ndoc_names=len(doc_names))
+
def format_id(i):
return i
-def format_item_as_github_task_list( i,item,repo):
- tmpl = "- [ ] {id}) [{file}:{lnum} ({func_name}())]({link}) - __Missing__[{nmissing}/{total_args}]: {undoc_names}"
+def format_item_as_github_task_list(i, item, repo):
+ tmpl = ('- [ ] {id_}) [{fname}:{lnum} ({func_name}())]({link}) - '
+ '__Missing__[{nmissing}/{total_args}]: {undoc_names}')
link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}"
-
- link = link_tmpl.format(repo=repo,file=item.path ,lnum=item.lnum )
-
- s = tmpl.format(id=i,file=item.path ,
- lnum=item.lnum,
- func_name=item.func.__name__,
- link=link,
- nmissing=len(item.undoc_names),
- total_args=item.nsig_names,
- undoc_names=list(item.undoc_names))
-
+ link = link_tmpl.format(repo=repo, file=item.path, lnum=item.lnum)
+ s = tmpl.format(id_=i, fname=item.path, lnum=item.lnum,
+ func_name=item.func.__name__, link=link,
+ nmissing=len(item.undoc_names),
+ total_args=item.nsig_names,
+ undoc_names=list(item.undoc_names))
if item.missing_args:
- s+= " __Extra__(?): {missing_args}".format(missing_args=list(item.missing_args))
-
+ s += ' __Extra__(?): %s' % list(item.missing_args)
return s
-def format_item_as_plain(i,item):
- tmpl = "+{lnum} {path} {func_name}(): Missing[{nmissing}/{total_args}]={undoc_names}"
-
- s = tmpl.format(path=item.path ,
- lnum=item.lnum,
- func_name=item.func.__name__,
- nmissing=len(item.undoc_names),
- total_args=item.nsig_names,
- undoc_names=list(item.undoc_names))
+def format_item_as_plain(i, item):
+ tmpl = ('+{lnum} {path} {func_name}(): '
+ 'Missing[{nmissing}/{total_args}]={undoc_names}')
+ s = tmpl.format(path=item.path, lnum=item.lnum,
+ func_name=item.func.__name__,
+ nmissing=len(item.undoc_names),
+ total_args=item.nsig_names,
+ undoc_names=list(item.undoc_names))
if item.missing_args:
- s+= " Extra(?)={missing_args}".format(missing_args=list(item.missing_args))
-
+ s += ' Extra(?)=%s' % list(item.missing_args)
return s
+
def main():
module = __import__(args.module)
if not args.path:
- args.path=os.path.dirname(module.__file__)
- collect=[cmp_docstring_sig(e) for e in entry_gen(module,module.__name__)]
- # only include if there are missing arguments in the docstring (fewer false positives)
- # and there are at least some documented arguments
- collect = [e for e in collect if e.undoc_names and len(e.undoc_names) != e.nsig_names]
- collect.sort(key=lambda x:x.path)
+ args.path = os.path.dirname(module.__file__)
+ collect = [cmp_docstring_sig(e)
+ for e in entry_gen(module, module.__name__)]
+ # only include if there are missing arguments in the docstring
+ # (fewer false positives) and there are at least some documented arguments
+ collect = [e for e in collect
+ if e.undoc_names and len(e.undoc_names) != e.nsig_names]
+ collect.sort(key=lambda x: x.path)
if args.github_repo:
- for i,item in enumerate(collect,1):
- print( format_item_as_github_task_list(i,item,args.github_repo))
+ for i, item in enumerate(collect, 1):
+ print(format_item_as_github_task_list(i, item, args.github_repo))
else:
- for i,item in enumerate(collect,1):
- print( format_item_as_plain(i, item))
+ for i, item in enumerate(collect, 1):
+ print(format_item_as_plain(i, item))
+
-if __name__ == "__main__":
- import sys
+if __name__ == '__main__':
sys.exit(main())
diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py
index 5337c37fe5320..31264cad52e4f 100755
--- a/scripts/merge-pr.py
+++ b/scripts/merge-pr.py
@@ -22,7 +22,6 @@
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
-
from __future__ import print_function
from subprocess import check_output
@@ -223,7 +222,7 @@ def update_pr(pr_num, user_login, base_ref):
try:
run_cmd(
'git push -f %s %s:%s' % (push_user_remote, pr_branch_name,
- base_ref))
+ base_ref))
except Exception as e:
fail("Exception while pushing: %s" % e)
clean_up()
@@ -275,6 +274,7 @@ def fix_version_from_branch(branch, versions):
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
+
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
| - [X] closes #18949
- [ ] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19344 | 2018-01-22T14:29:03Z | 2018-01-24T01:20:21Z | 2018-01-24T01:20:21Z | 2018-01-24T01:21:09Z |
MAINT: Remove pytest.warns in tests | diff --git a/ci/lint.sh b/ci/lint.sh
index a96e0961304e7..98b33c0803d90 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -89,6 +89,14 @@ if [ "$LINT" ]; then
if [ $? = "0" ]; then
RET=1
fi
+
+ # Check for pytest.warns
+ grep -r -E --include '*.py' 'pytest\.warns' pandas/tests/
+
+ if [ $? = "0" ]; then
+ RET=1
+ fi
+
echo "Check for invalid testing DONE"
# Check for imports from pandas.core.common instead
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index b9c95c372ab9e..7be801629e387 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -937,7 +937,7 @@ def test_from_M8_structured(self):
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
- with pytest.warns(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series.from_array(arr['Date'], Index([0]))
assert s[0] == dates[0][0]
| Per discussion in #18258, we are prohibiting its use in tests, at least for the time being. | https://api.github.com/repos/pandas-dev/pandas/pulls/19341 | 2018-01-22T10:22:50Z | 2018-01-22T23:59:55Z | 2018-01-22T23:59:55Z | 2018-01-23T10:05:14Z |
ENH: Add dtype parameter to IntervalIndex constructors and deprecate from_intervals | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 88419df1880ec..ddd09327935ce 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1617,7 +1617,6 @@ IntervalIndex Components
IntervalIndex.from_arrays
IntervalIndex.from_tuples
IntervalIndex.from_breaks
- IntervalIndex.from_intervals
IntervalIndex.contains
IntervalIndex.left
IntervalIndex.right
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 71492154419fb..4dde76dee46a5 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -207,9 +207,8 @@ Other Enhancements
:func:`pandas.api.extensions.register_index_accessor`, accessor for libraries downstream of pandas
to register custom accessors like ``.cat`` on pandas objects. See
:ref:`Registering Custom Accessors <developer.register-accessors>` for more (:issue:`14781`).
-
-
- ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`)
+- :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`)
.. _whatsnew_0230.api_breaking:
@@ -329,6 +328,7 @@ Deprecations
- ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`).
- :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`)
- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`).
+- ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`)
.. _whatsnew_0230.prior_deprecations:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f67e6eae27001..74c6abeb0ad12 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -200,7 +200,9 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
- return IntervalIndex(data, dtype=dtype, name=name, copy=copy)
+ closed = kwargs.get('closed', None)
+ return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
+ closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
@@ -313,8 +315,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
- return IntervalIndex.from_intervals(subarr, name=name,
- copy=copy)
+ return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 2d4655d84dca8..2c7be2b21f959 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -341,7 +341,7 @@ def __array__(self, dtype=None):
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
- return IntervalIndex.from_intervals(np.array(self))
+ return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype._update_dtype(dtype)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 18fb71b490592..232770e582763 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,6 +1,7 @@
""" define the IntervalIndex """
import numpy as np
+import warnings
from pandas.core.dtypes.missing import notna, isna
from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex
@@ -151,6 +152,10 @@ class IntervalIndex(IntervalMixin, Index):
Name to be stored in the index.
copy : boolean, default False
Copy the meta-data
+ dtype : dtype or None, default None
+ If None, dtype will be inferred
+
+ ..versionadded:: 0.23.0
Attributes
----------
@@ -167,7 +172,6 @@ class IntervalIndex(IntervalMixin, Index):
from_arrays
from_tuples
from_breaks
- from_intervals
contains
Examples
@@ -181,8 +185,7 @@ class IntervalIndex(IntervalMixin, Index):
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
- :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_intervals`
- and :meth:`IntervalIndex.from_tuples`.
+ :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
@@ -211,8 +214,7 @@ class IntervalIndex(IntervalMixin, Index):
_mask = None
- def __new__(cls, data, closed=None,
- name=None, copy=False, dtype=None,
+ def __new__(cls, data, closed=None, name=None, copy=False, dtype=None,
fastpath=False, verify_integrity=True):
if fastpath:
@@ -245,19 +247,28 @@ def __new__(cls, data, closed=None,
closed = closed or infer_closed
- return cls._simple_new(left, right, closed, name,
- copy=copy, verify_integrity=verify_integrity)
+ return cls._simple_new(left, right, closed, name, copy=copy,
+ dtype=dtype, verify_integrity=verify_integrity)
@classmethod
- def _simple_new(cls, left, right, closed=None, name=None,
- copy=False, verify_integrity=True):
+ def _simple_new(cls, left, right, closed=None, name=None, copy=False,
+ dtype=None, verify_integrity=True):
result = IntervalMixin.__new__(cls)
- if closed is None:
- closed = 'right'
+ closed = closed or 'right'
left = _ensure_index(left, copy=copy)
right = _ensure_index(right, copy=copy)
+ if dtype is not None:
+ # GH 19262: dtype must be an IntervalDtype to override inferred
+ dtype = pandas_dtype(dtype)
+ if not is_interval_dtype(dtype):
+ msg = 'dtype must be an IntervalDtype, got {dtype}'
+ raise TypeError(msg.format(dtype=dtype))
+ elif dtype.subtype is not None:
+ left = left.astype(dtype.subtype)
+ right = right.astype(dtype.subtype)
+
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
@@ -304,7 +315,7 @@ def _shallow_copy(self, left=None, right=None, **kwargs):
# only single value passed, could be an IntervalIndex
# or array of Intervals
if not isinstance(left, IntervalIndex):
- left = type(self).from_intervals(left)
+ left = self._constructor(left)
left, right = left.left, left.right
else:
@@ -322,7 +333,7 @@ def _validate(self):
Verify that the IntervalIndex is valid.
"""
if self.closed not in _VALID_CLOSED:
- raise ValueError("invalid options for 'closed': {closed}"
+ raise ValueError("invalid option for 'closed': {closed}"
.format(closed=self.closed))
if len(self.left) != len(self.right):
raise ValueError('left and right must have the same length')
@@ -356,7 +367,7 @@ def _engine(self):
@property
def _constructor(self):
- return type(self).from_intervals
+ return type(self)
def __contains__(self, key):
"""
@@ -402,7 +413,8 @@ def contains(self, key):
return False
@classmethod
- def from_breaks(cls, breaks, closed='right', name=None, copy=False):
+ def from_breaks(cls, breaks, closed='right', name=None, copy=False,
+ dtype=None):
"""
Construct an IntervalIndex from an array of splits
@@ -417,6 +429,10 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False):
Name to be stored in the index.
copy : boolean, default False
copy the data
+ dtype : dtype or None, default None
+ If None, dtype will be inferred
+
+ ..versionadded:: 0.23.0
Examples
--------
@@ -430,18 +446,17 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False):
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array
- IntervalIndex.from_intervals : Construct an IntervalIndex from an array
- of Interval objects
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed,
- name=name, copy=copy)
+ name=name, copy=copy, dtype=dtype)
@classmethod
- def from_arrays(cls, left, right, closed='right', name=None, copy=False):
+ def from_arrays(cls, left, right, closed='right', name=None, copy=False,
+ dtype=None):
"""
Construct an IntervalIndex from a a left and right array
@@ -458,6 +473,10 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False):
Name to be stored in the index.
copy : boolean, default False
copy the data
+ dtype : dtype or None, default None
+ If None, dtype will be inferred
+
+ ..versionadded:: 0.23.0
Examples
--------
@@ -471,22 +490,23 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False):
interval_range : Function to create a fixed frequency IntervalIndex
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits
- IntervalIndex.from_intervals : Construct an IntervalIndex from an array
- of Interval objects
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
- return cls._simple_new(left, right, closed, name=name,
- copy=copy, verify_integrity=True)
+ return cls._simple_new(left, right, closed, name=name, copy=copy,
+ dtype=dtype, verify_integrity=True)
@classmethod
- def from_intervals(cls, data, name=None, copy=False):
+ def from_intervals(cls, data, closed=None, name=None, copy=False,
+ dtype=None):
"""
Construct an IntervalIndex from a 1d array of Interval objects
+ .. deprecated:: 0.23.0
+
Parameters
----------
data : array-like (1-dimensional)
@@ -496,6 +516,10 @@ def from_intervals(cls, data, name=None, copy=False):
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
+ dtype : dtype or None, default None
+ If None, dtype will be inferred
+
+ ..versionadded:: 0.23.0
Examples
--------
@@ -521,16 +545,14 @@ def from_intervals(cls, data, name=None, copy=False):
IntervalIndex.from_tuples : Construct an IntervalIndex from a
list/array of tuples
"""
- if isinstance(data, IntervalIndex):
- left, right, closed = data.left, data.right, data.closed
- name = name or data.name
- else:
- data = maybe_convert_platform_interval(data)
- left, right, closed = intervals_to_interval_bounds(data)
- return cls.from_arrays(left, right, closed, name=name, copy=False)
+ msg = ('IntervalIndex.from_intervals is deprecated and will be '
+ 'removed in a future version; use IntervalIndex(...) instead')
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ return cls(data, closed=closed, name=name, copy=copy, dtype=dtype)
@classmethod
- def from_tuples(cls, data, closed='right', name=None, copy=False):
+ def from_tuples(cls, data, closed='right', name=None, copy=False,
+ dtype=None):
"""
Construct an IntervalIndex from a list/array of tuples
@@ -545,10 +567,14 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
Name to be stored in the index.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
+ dtype : dtype or None, default None
+ If None, dtype will be inferred
+
+ ..versionadded:: 0.23.0
Examples
--------
- >>> pd.IntervalIndex.from_tuples([(0, 1), (1,2)])
+ >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
@@ -559,8 +585,6 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
right array
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits
- IntervalIndex.from_intervals : Construct an IntervalIndex from an array
- of Interval objects
"""
if len(data):
left, right = [], []
@@ -571,15 +595,22 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
if isna(d):
lhs = rhs = np.nan
else:
- lhs, rhs = d
+ try:
+ # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
+ lhs, rhs = d
+ except ValueError:
+ msg = ('IntervalIndex.from_tuples requires tuples of '
+ 'length 2, got {tpl}').format(tpl=d)
+ raise ValueError(msg)
+ except TypeError:
+ msg = ('IntervalIndex.from_tuples received an invalid '
+ 'item, {tpl}').format(tpl=d)
+ raise TypeError(msg)
left.append(lhs)
right.append(rhs)
- # TODO
- # if we have nulls and we previous had *only*
- # integer data, then we have changed the dtype
-
- return cls.from_arrays(left, right, closed, name=name, copy=False)
+ return cls.from_arrays(left, right, closed, name=name, copy=False,
+ dtype=dtype)
def to_tuples(self, na_tuple=True):
"""
@@ -921,7 +952,7 @@ def get_loc(self, key, method=None):
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
- >>> index = pd.IntervalIndex.from_intervals([i1, i2])
+ >>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
@@ -937,7 +968,7 @@ def get_loc(self, key, method=None):
relevant intervals.
>>> i3 = pd.Interval(0, 2)
- >>> overlapping_index = pd.IntervalIndex.from_intervals([i2, i3])
+ >>> overlapping_index = pd.IntervalIndex([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 2adf17a227a59..777f08bd9db2b 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -348,8 +348,7 @@ def _format_labels(bins, precision, right=True,
# account that we are all right closed
v = adjust(labels[0].left)
- i = IntervalIndex.from_intervals(
- [Interval(v, labels[0].right, closed='right')])
+ i = IntervalIndex([Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
diff --git a/pandas/tests/categorical/test_constructors.py b/pandas/tests/categorical/test_constructors.py
index abea7e9a0e0b4..b29d75bed5c6f 100644
--- a/pandas/tests/categorical/test_constructors.py
+++ b/pandas/tests/categorical/test_constructors.py
@@ -76,9 +76,7 @@ def test_constructor_unsortable(self):
def test_constructor_interval(self):
result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
ordered=True)
- ii = IntervalIndex.from_intervals([Interval(1, 2),
- Interval(2, 3),
- Interval(3, 6)])
+ ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py
new file mode 100644
index 0000000000000..5fdf92dcb2044
--- /dev/null
+++ b/pandas/tests/indexes/interval/test_construction.py
@@ -0,0 +1,342 @@
+from __future__ import division
+
+import pytest
+import numpy as np
+from functools import partial
+
+from pandas import (
+ Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical,
+ date_range, timedelta_range, period_range, notna)
+from pandas.compat import lzip
+from pandas.core.dtypes.dtypes import IntervalDtype
+import pandas.core.common as com
+import pandas.util.testing as tm
+
+
+@pytest.fixture(params=['left', 'right', 'both', 'neither'])
+def closed(request):
+ return request.param
+
+
+@pytest.fixture(params=[None, 'foo'])
+def name(request):
+ return request.param
+
+
+class Base(object):
+ """
+ Common tests for all variations of IntervalIndex construction. Input data
+ to be supplied in breaks format, then converted by the subclass method
+ get_kwargs_from_breaks to the expected format.
+ """
+
+ @pytest.mark.parametrize('breaks', [
+ [3, 14, 15, 92, 653],
+ np.arange(10, dtype='int64'),
+ Int64Index(range(-10, 11)),
+ Float64Index(np.arange(20, 30, 0.5)),
+ date_range('20180101', periods=10),
+ date_range('20180101', periods=10, tz='US/Eastern'),
+ timedelta_range('1 day', periods=10)])
+ def test_constructor(self, constructor, breaks, closed, name):
+ result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
+ result = constructor(closed=closed, name=name, **result_kwargs)
+
+ assert result.closed == closed
+ assert result.name == name
+ assert result.dtype.subtype == getattr(breaks, 'dtype', 'int64')
+ tm.assert_index_equal(result.left, Index(breaks[:-1]))
+ tm.assert_index_equal(result.right, Index(breaks[1:]))
+
+ @pytest.mark.parametrize('breaks, subtype', [
+ (Int64Index([0, 1, 2, 3, 4]), 'float64'),
+ (Int64Index([0, 1, 2, 3, 4]), 'datetime64[ns]'),
+ (Int64Index([0, 1, 2, 3, 4]), 'timedelta64[ns]'),
+ (Float64Index([0, 1, 2, 3, 4]), 'int64'),
+ (date_range('2017-01-01', periods=5), 'int64'),
+ (timedelta_range('1 day', periods=5), 'int64')])
+ def test_constructor_dtype(self, constructor, breaks, subtype):
+ # GH 19262: conversion via dtype parameter
+ expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
+ expected = constructor(**expected_kwargs)
+
+ result_kwargs = self.get_kwargs_from_breaks(breaks)
+ iv_dtype = IntervalDtype(subtype)
+ for dtype in (iv_dtype, str(iv_dtype)):
+ result = constructor(dtype=dtype, **result_kwargs)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('breaks', [
+ [np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
+ def test_constructor_nan(self, constructor, breaks, closed):
+ # GH 18421
+ result_kwargs = self.get_kwargs_from_breaks(breaks)
+ result = constructor(closed=closed, **result_kwargs)
+
+ expected_subtype = np.float64
+ expected_values = np.array(breaks[:-1], dtype=object)
+
+ assert result.closed == closed
+ assert result.dtype.subtype == expected_subtype
+ tm.assert_numpy_array_equal(result.values, expected_values)
+
+ @pytest.mark.parametrize('breaks', [
+ [],
+ np.array([], dtype='int64'),
+ np.array([], dtype='float64'),
+ np.array([], dtype='datetime64[ns]'),
+ np.array([], dtype='timedelta64[ns]')])
+ def test_constructor_empty(self, constructor, breaks, closed):
+ # GH 18421
+ result_kwargs = self.get_kwargs_from_breaks(breaks)
+ result = constructor(closed=closed, **result_kwargs)
+
+ expected_values = np.array([], dtype=object)
+ expected_subtype = getattr(breaks, 'dtype', np.int64)
+
+ assert result.empty
+ assert result.closed == closed
+ assert result.dtype.subtype == expected_subtype
+ tm.assert_numpy_array_equal(result.values, expected_values)
+
+ @pytest.mark.parametrize('breaks', [
+ tuple('0123456789'),
+ list('abcdefghij'),
+ np.array(list('abcdefghij'), dtype=object),
+ np.array(list('abcdefghij'), dtype='<U1')])
+ def test_constructor_string(self, constructor, breaks):
+ # GH 19016
+ msg = ('category, object, and string subtypes are not supported '
+ 'for IntervalIndex')
+ with tm.assert_raises_regex(TypeError, msg):
+ constructor(**self.get_kwargs_from_breaks(breaks))
+
+ def test_generic_errors(self, constructor):
+ # filler input data to be used when supplying invalid kwargs
+ filler = self.get_kwargs_from_breaks(range(10))
+
+ # invalid closed
+ msg = "invalid option for 'closed': invalid"
+ with tm.assert_raises_regex(ValueError, msg):
+ constructor(closed='invalid', **filler)
+
+ # unsupported dtype
+ msg = 'dtype must be an IntervalDtype, got int64'
+ with tm.assert_raises_regex(TypeError, msg):
+ constructor(dtype='int64', **filler)
+
+ # invalid dtype
+ msg = 'data type "invalid" not understood'
+ with tm.assert_raises_regex(TypeError, msg):
+ constructor(dtype='invalid', **filler)
+
+ # no point in nesting periods in an IntervalIndex
+ periods = period_range('2000-01-01', periods=10)
+ periods_kwargs = self.get_kwargs_from_breaks(periods)
+ msg = 'Period dtypes are not supported, use a PeriodIndex instead'
+ with tm.assert_raises_regex(ValueError, msg):
+ constructor(**periods_kwargs)
+
+ # decreasing values
+ decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
+ msg = 'left side of interval must be <= right side'
+ with tm.assert_raises_regex(ValueError, msg):
+ constructor(**decreasing_kwargs)
+
+
+class TestFromArrays(Base):
+ """Tests specific to IntervalIndex.from_arrays"""
+
+ @pytest.fixture
+ def constructor(self):
+ return IntervalIndex.from_arrays
+
+ def get_kwargs_from_breaks(self, breaks, closed='right'):
+ """
+ converts intervals in breaks format to a dictionary of kwargs to
+ specific to the format expected by IntervalIndex.from_arrays
+ """
+ return {'left': breaks[:-1], 'right': breaks[1:]}
+
+ def test_constructor_errors(self):
+ # GH 19016: categorical data
+ data = Categorical(list('01234abcde'), ordered=True)
+ msg = ('category, object, and string subtypes are not supported '
+ 'for IntervalIndex')
+ with tm.assert_raises_regex(TypeError, msg):
+ IntervalIndex.from_arrays(data[:-1], data[1:])
+
+ # unequal length
+ left = [0, 1, 2]
+ right = [2, 3]
+ msg = 'left and right must have the same length'
+ with tm.assert_raises_regex(ValueError, msg):
+ IntervalIndex.from_arrays(left, right)
+
+ @pytest.mark.parametrize('left_subtype, right_subtype', [
+ (np.int64, np.float64), (np.float64, np.int64)])
+ def test_mixed_float_int(self, left_subtype, right_subtype):
+ """mixed int/float left/right results in float for both sides"""
+ left = np.arange(9, dtype=left_subtype)
+ right = np.arange(1, 10, dtype=right_subtype)
+ result = IntervalIndex.from_arrays(left, right)
+
+ expected_left = Float64Index(left)
+ expected_right = Float64Index(right)
+ expected_subtype = np.float64
+
+ tm.assert_index_equal(result.left, expected_left)
+ tm.assert_index_equal(result.right, expected_right)
+ assert result.dtype.subtype == expected_subtype
+
+
+class TestFromBreaks(Base):
+ """Tests specific to IntervalIndex.from_breaks"""
+
+ @pytest.fixture
+ def constructor(self):
+ return IntervalIndex.from_breaks
+
+ def get_kwargs_from_breaks(self, breaks, closed='right'):
+ """
+ converts intervals in breaks format to a dictionary of kwargs to
+ specific to the format expected by IntervalIndex.from_breaks
+ """
+ return {'breaks': breaks}
+
+ def test_constructor_errors(self):
+ # GH 19016: categorical data
+ data = Categorical(list('01234abcde'), ordered=True)
+ msg = ('category, object, and string subtypes are not supported '
+ 'for IntervalIndex')
+ with tm.assert_raises_regex(TypeError, msg):
+ IntervalIndex.from_breaks(data)
+
+ def test_length_one(self):
+ """breaks of length one produce an empty IntervalIndex"""
+ breaks = [0]
+ result = IntervalIndex.from_breaks(breaks)
+ expected = IntervalIndex.from_breaks([])
+ tm.assert_index_equal(result, expected)
+
+
+class TestFromTuples(Base):
+ """Tests specific to IntervalIndex.from_tuples"""
+
+ @pytest.fixture
+ def constructor(self):
+ return IntervalIndex.from_tuples
+
+ def get_kwargs_from_breaks(self, breaks, closed='right'):
+ """
+ converts intervals in breaks format to a dictionary of kwargs to
+ specific to the format expected by IntervalIndex.from_tuples
+ """
+ if len(breaks) == 0:
+ return {'data': breaks}
+
+ tuples = lzip(breaks[:-1], breaks[1:])
+ if isinstance(breaks, (list, tuple)):
+ return {'data': tuples}
+ return {'data': com._asarray_tuplesafe(tuples)}
+
+ def test_constructor_errors(self):
+ # non-tuple
+ tuples = [(0, 1), 2, (3, 4)]
+ msg = 'IntervalIndex.from_tuples received an invalid item, 2'
+ with tm.assert_raises_regex(TypeError, msg.format(t=tuples)):
+ IntervalIndex.from_tuples(tuples)
+
+ # too few/many items
+ tuples = [(0, 1), (2,), (3, 4)]
+ msg = 'IntervalIndex.from_tuples requires tuples of length 2, got {t}'
+ with tm.assert_raises_regex(ValueError, msg.format(t=tuples)):
+ IntervalIndex.from_tuples(tuples)
+
+ tuples = [(0, 1), (2, 3, 4), (5, 6)]
+ with tm.assert_raises_regex(ValueError, msg.format(t=tuples)):
+ IntervalIndex.from_tuples(tuples)
+
+ def test_na_tuples(self):
+ # tuple (NA, NA) evaluates the same as NA as an elemenent
+ na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
+ idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
+ idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
+ tm.assert_index_equal(idx_na_tuple, idx_na_element)
+
+
+class TestClassConstructors(Base):
+ """Tests specific to the IntervalIndex/Index constructors"""
+
+ @pytest.fixture(params=[IntervalIndex, partial(Index, dtype='interval')],
+ ids=['IntervalIndex', 'Index'])
+ def constructor(self, request):
+ return request.param
+
+ def get_kwargs_from_breaks(self, breaks, closed='right'):
+ """
+ converts intervals in breaks format to a dictionary of kwargs to
+ specific to the format expected by the IntervalIndex/Index constructors
+ """
+ if len(breaks) == 0:
+ return {'data': breaks}
+
+ ivs = [Interval(l, r, closed) if notna(l) else l
+ for l, r in zip(breaks[:-1], breaks[1:])]
+
+ if isinstance(breaks, list):
+ return {'data': ivs}
+ return {'data': np.array(ivs, dtype=object)}
+
+ def test_generic_errors(self, constructor):
+ """
+ override the base class implementation since errors are handled
+ differently; checks unnecessary since caught at the Interval level
+ """
+ pass
+
+ def test_constructor_errors(self, constructor):
+ # mismatched closed inferred from intervals vs constructor.
+ ivs = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
+ msg = 'conflicting values for closed'
+ with tm.assert_raises_regex(ValueError, msg):
+ constructor(ivs, closed='neither')
+
+ # mismatched closed within intervals
+ ivs = [Interval(0, 1, closed='right'), Interval(2, 3, closed='left')]
+ msg = 'intervals must all be closed on the same side'
+ with tm.assert_raises_regex(ValueError, msg):
+ constructor(ivs)
+
+ # scalar
+ msg = (r'IntervalIndex\(...\) must be called with a collection of '
+ 'some kind, 5 was passed')
+ with tm.assert_raises_regex(TypeError, msg):
+ constructor(5)
+
+ # not an interval
+ msg = ("type <(class|type) 'numpy.int64'> with value 0 "
+ "is not an interval")
+ with tm.assert_raises_regex(TypeError, msg):
+ constructor([0, 1])
+
+
+class TestFromIntervals(TestClassConstructors):
+ """
+ Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
+ IntervalIndex constructor. Same tests as the IntervalIndex constructor,
+ plus deprecation test. Should only need to delete this class when removed.
+ """
+
+ @pytest.fixture
+ def constructor(self):
+ def from_intervals_ignore_warnings(*args, **kwargs):
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ return IntervalIndex.from_intervals(*args, **kwargs)
+ return from_intervals_ignore_warnings
+
+ def test_deprecated(self):
+ ivs = [Interval(0, 1), Interval(1, 2)]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ IntervalIndex.from_intervals(ivs)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 0e509c241fe51..71a6f78125004 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -4,7 +4,7 @@
import numpy as np
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
- Timedelta, date_range, timedelta_range, Categorical)
+ Timedelta, date_range, timedelta_range)
from pandas.compat import lzip
import pandas.core.common as com
from pandas.tests.indexes.common import Base
@@ -40,249 +40,6 @@ def create_index_with_nan(self, closed='right'):
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
- @pytest.mark.parametrize('data', [
- Index([0, 1, 2, 3, 4]),
- date_range('2017-01-01', periods=5),
- date_range('2017-01-01', periods=5, tz='US/Eastern'),
- timedelta_range('1 day', periods=5)])
- def test_constructors(self, data, closed, name):
- left, right = data[:-1], data[1:]
- ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
- expected = IntervalIndex._simple_new(
- left=left, right=right, closed=closed, name=name)
-
- # validate expected
- assert expected.closed == closed
- assert expected.name == name
- assert expected.dtype.subtype == data.dtype
- tm.assert_index_equal(expected.left, data[:-1])
- tm.assert_index_equal(expected.right, data[1:])
-
- # validated constructors
- result = IntervalIndex(ivs, name=name)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_intervals(ivs, name=name)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_breaks(data, closed=closed, name=name)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_arrays(
- left, right, closed=closed, name=name)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_tuples(
- lzip(left, right), closed=closed, name=name)
- tm.assert_index_equal(result, expected)
-
- result = Index(ivs, name=name)
- assert isinstance(result, IntervalIndex)
- tm.assert_index_equal(result, expected)
-
- # idempotent
- tm.assert_index_equal(Index(expected), expected)
- tm.assert_index_equal(IntervalIndex(expected), expected)
-
- result = IntervalIndex.from_intervals(expected)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_intervals(
- expected.values, name=expected.name)
- tm.assert_index_equal(result, expected)
-
- left, right = expected.left, expected.right
- result = IntervalIndex.from_arrays(
- left, right, closed=expected.closed, name=expected.name)
- tm.assert_index_equal(result, expected)
-
- result = IntervalIndex.from_tuples(
- expected.to_tuples(), closed=expected.closed, name=expected.name)
- tm.assert_index_equal(result, expected)
-
- breaks = expected.left.tolist() + [expected.right[-1]]
- result = IntervalIndex.from_breaks(
- breaks, closed=expected.closed, name=expected.name)
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
- def test_constructors_nan(self, closed, data):
- # GH 18421
- expected_values = np.array(data, dtype=object)
- expected_idx = IntervalIndex(data, closed=closed)
-
- # validate the expected index
- assert expected_idx.closed == closed
- tm.assert_numpy_array_equal(expected_idx.values, expected_values)
-
- result = IntervalIndex.from_tuples(data, closed=closed)
- tm.assert_index_equal(result, expected_idx)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
- tm.assert_index_equal(result, expected_idx)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- result = IntervalIndex.from_arrays(data, data, closed=closed)
- tm.assert_index_equal(result, expected_idx)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- if closed == 'right':
- # Can't specify closed for IntervalIndex.from_intervals
- result = IntervalIndex.from_intervals(data)
- tm.assert_index_equal(result, expected_idx)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- @pytest.mark.parametrize('data', [
- [],
- np.array([], dtype='int64'),
- np.array([], dtype='float64'),
- np.array([], dtype='datetime64[ns]')])
- def test_constructors_empty(self, data, closed):
- # GH 18421
- expected_dtype = getattr(data, 'dtype', np.int64)
- expected_values = np.array([], dtype=object)
- expected_index = IntervalIndex(data, closed=closed)
-
- # validate the expected index
- assert expected_index.empty
- assert expected_index.closed == closed
- assert expected_index.dtype.subtype == expected_dtype
- tm.assert_numpy_array_equal(expected_index.values, expected_values)
-
- result = IntervalIndex.from_tuples(data, closed=closed)
- tm.assert_index_equal(result, expected_index)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- result = IntervalIndex.from_breaks(data, closed=closed)
- tm.assert_index_equal(result, expected_index)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- result = IntervalIndex.from_arrays(data, data, closed=closed)
- tm.assert_index_equal(result, expected_index)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- if closed == 'right':
- # Can't specify closed for IntervalIndex.from_intervals
- result = IntervalIndex.from_intervals(data)
- tm.assert_index_equal(result, expected_index)
- tm.assert_numpy_array_equal(result.values, expected_values)
-
- def test_constructors_errors(self):
-
- # scalar
- msg = (r'IntervalIndex\(...\) must be called with a collection of '
- 'some kind, 5 was passed')
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex(5)
-
- # not an interval
- msg = ("type <(class|type) 'numpy.int64'> with value 0 "
- "is not an interval")
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex([0, 1])
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_intervals([0, 1])
-
- # invalid closed
- msg = "invalid options for 'closed': invalid"
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
-
- # mismatched closed within intervals
- msg = 'intervals must all be closed on the same side'
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex.from_intervals([Interval(0, 1),
- Interval(1, 2, closed='left')])
-
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex([Interval(0, 1), Interval(2, 3, closed='left')])
-
- with tm.assert_raises_regex(ValueError, msg):
- Index([Interval(0, 1), Interval(2, 3, closed='left')])
-
- # mismatched closed inferred from intervals vs constructor.
- msg = 'conflicting values for closed'
- with tm.assert_raises_regex(ValueError, msg):
- iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
- IntervalIndex(iv, closed='neither')
-
- # no point in nesting periods in an IntervalIndex
- msg = 'Period dtypes are not supported, use a PeriodIndex instead'
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex.from_breaks(
- pd.period_range('2000-01-01', periods=3))
-
- # decreasing breaks/arrays
- msg = 'left side of interval must be <= right side'
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex.from_breaks(range(10, -1, -1))
-
- with tm.assert_raises_regex(ValueError, msg):
- IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
-
- # GH 19016: categorical data
- data = Categorical(list('01234abcde'), ordered=True)
- msg = ('category, object, and string subtypes are not supported '
- 'for IntervalIndex')
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_breaks(data)
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_arrays(data[:-1], data[1:])
-
- @pytest.mark.parametrize('data', [
- tuple('0123456789'),
- list('abcdefghij'),
- np.array(list('abcdefghij'), dtype=object),
- np.array(list('abcdefghij'), dtype='<U1')])
- def test_constructors_errors_string(self, data):
- # GH 19016
- left, right = data[:-1], data[1:]
- tuples = lzip(left, right)
- ivs = [Interval(l, r) for l, r in tuples] or data
- msg = ('category, object, and string subtypes are not supported '
- 'for IntervalIndex')
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex(ivs)
-
- with tm.assert_raises_regex(TypeError, msg):
- Index(ivs)
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_intervals(ivs)
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_breaks(data)
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_arrays(left, right)
-
- with tm.assert_raises_regex(TypeError, msg):
- IntervalIndex.from_tuples(tuples)
-
- @pytest.mark.parametrize('tz_left, tz_right', [
- (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
- def test_constructors_errors_tz(self, tz_left, tz_right):
- # GH 18537
- left = date_range('2017-01-01', periods=4, tz=tz_left)
- right = date_range('2017-01-02', periods=4, tz=tz_right)
-
- # don't need to check IntervalIndex(...) or from_intervals, since
- # mixed tz are disallowed at the Interval level
- with pytest.raises(ValueError):
- IntervalIndex.from_arrays(left, right)
-
- with pytest.raises(ValueError):
- IntervalIndex.from_tuples(lzip(left, right))
-
- with pytest.raises(ValueError):
- breaks = left.tolist() + [right[-1]]
- IntervalIndex.from_breaks(breaks)
-
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
@@ -384,7 +141,7 @@ def test_ensure_copied_data(self, closed):
check_same='same')
# by-definition make a copy
- result = IntervalIndex.from_intervals(index.values, copy=False)
+ result = IntervalIndex(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 268376b1f0d32..c2e40c79f8914 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -422,7 +422,7 @@ def test_astype(self):
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
- result = IntervalIndex.from_intervals(result.values)
+ result = IntervalIndex(result.values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('name', [None, 'foo'])
diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py
index 48f25112b45cf..f7262a2f0da63 100644
--- a/pandas/tests/reshape/test_tile.py
+++ b/pandas/tests/reshape/test_tile.py
@@ -236,7 +236,7 @@ def test_qcut_include_lowest(self):
ii = qcut(values, 4)
- ex_levels = IntervalIndex.from_intervals(
+ ex_levels = IntervalIndex(
[Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
@@ -333,8 +333,7 @@ def test_series_retbins(self):
def test_qcut_duplicates_bin(self):
# GH 7751
values = [0, 0, 0, 0, 1, 2, 3]
- expected = IntervalIndex.from_intervals([Interval(-0.001, 1),
- Interval(1, 3)])
+ expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
result = qcut(values, 3, duplicates='drop')
tm.assert_index_equal(result.categories, expected)
@@ -447,7 +446,7 @@ def test_datetime_cut(self):
result, bins = cut(data, 3, retbins=True)
expected = (
- Series(IntervalIndex.from_intervals([
+ Series(IntervalIndex([
Interval(Timestamp('2012-12-31 23:57:07.200000'),
Timestamp('2013-01-01 16:00:00')),
Interval(Timestamp('2013-01-01 16:00:00'),
@@ -480,7 +479,7 @@ def test_datetime_bin(self):
data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]
bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']
expected = (
- Series(IntervalIndex.from_intervals([
+ Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))
.astype(CDT(ordered=True)))
| - [X] closes #19262
- [X] closes #19263
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Summary:
- Added support for a `dtype` parameter to all IntervalIndex constructors
- Allows users to override the inferred dtype
- Deprecated `IntervalIndex.from_intervals`
- Still added `dtype` parameter, since it's just a pass through to `IntervalIndex`
- Removed usage and references to `IntervalIndex.from_intervals` throughout the codebase
- Split construction tests off into `interval/test_construction.py`
- Created a base class for common tests, and subclasses for each constructor
- Was previously written in a more flat style, where each constructor was explicitly called
- Expanded the tests to hit some previously untested behavior | https://api.github.com/repos/pandas-dev/pandas/pulls/19339 | 2018-01-22T03:53:42Z | 2018-01-25T01:29:03Z | 2018-01-25T01:29:02Z | 2018-01-25T01:31:16Z |
remove BlockManager.reindex | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 5c3481ed6d4ff..c2d3d0852384c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4407,42 +4407,6 @@ def _blklocs(self):
""" compat with BlockManager """
return None
- def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
- limit=None, copy=True):
- # if we are the same and don't copy, just return
- if self.index.equals(new_axis):
- if copy:
- return self.copy(deep=True)
- else:
- return self
-
- values = self._block.get_values()
-
- if indexer is None:
- indexer = self.items.get_indexer_for(new_axis)
-
- if fill_value is None:
- fill_value = np.nan
-
- new_values = algos.take_1d(values, indexer, fill_value=fill_value)
-
- # fill if needed
- if method is not None or limit is not None:
- new_values = missing.interpolate_2d(new_values,
- method=method,
- limit=limit,
- fill_value=fill_value)
-
- if self._block.is_sparse:
- make_block = self._block.make_block_same_class
-
- block = make_block(new_values, copy=copy,
- placement=slice(0, len(new_axis)))
-
- mgr = SingleBlockManager(block, new_axis)
- mgr._consolidate_inplace()
- return mgr
-
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b7dcc48599f37..a14eb69d86377 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -197,8 +197,13 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
- else:
- data = data.reindex(index, copy=copy)
+ elif not data.index.equals(index) or copy:
+ # GH#19275 SingleBlockManager input should only be called
+ # internally
+ raise AssertionError('Cannot pass both SingleBlockManager '
+ '`data` argument and a different '
+ '`index` argument. `copy` must '
+ 'be False.')
elif isinstance(data, Categorical):
# GH12574: Allow dtype=category only, otherwise error
if ((dtype is not None) and
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 4b649927f8f72..2106fdf8e5e8e 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -166,9 +166,13 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block',
data = data.astype(dtype)
if index is None:
index = data.index.view()
- else:
-
- data = data.reindex(index, copy=False)
+ elif not data.index.equals(index) or copy: # pragma: no cover
+ # GH#19275 SingleBlockManager input should only be called
+ # internally
+ raise AssertionError('Cannot pass both SingleBlockManager '
+ '`data` argument and a different '
+ '`index` argument. `copy` must '
+ 'be False.')
else:
length = len(index)
| - [x] closes #19275
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19338 | 2018-01-22T03:32:00Z | 2018-01-24T01:23:36Z | 2018-01-24T01:23:35Z | 2018-02-11T21:59:30Z |
separate numeric tests so we can isolate division by zero | diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 05ccb25960b1f..554b3e15d8f10 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -596,77 +596,81 @@ def test_divide_decimal(self):
assert_series_equal(expected, s)
- def test_div(self):
+ @pytest.mark.parametrize(
+ 'dtype2',
+ [
+ np.int64, np.int32, np.int16, np.int8,
+ np.float64, np.float32, np.float16,
+ np.uint64, np.uint32,
+ np.uint16, np.uint8
+ ])
+ @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
+ def test_ser_div_ser(self, dtype1, dtype2):
+ # no longer do integer div for any ops, but deal with the 0's
+ first = Series([3, 4, 5, 8], name='first').astype(dtype1)
+ second = Series([0, 0, 0, 3], name='second').astype(dtype2)
+
with np.errstate(all='ignore'):
- # no longer do integer div for any ops, but deal with the 0's
- p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
- result = p['first'] / p['second']
- expected = Series(
- p['first'].values.astype(float) / p['second'].values,
- dtype='float64')
- expected.iloc[0:3] = np.inf
- assert_series_equal(result, expected)
+ expected = Series(first.values.astype(np.float64) / second.values,
+ dtype='float64', name=None)
+ expected.iloc[0:3] = np.inf
- result = p['first'] / 0
- expected = Series(np.inf, index=p.index, name='first')
- assert_series_equal(result, expected)
+ result = first / second
+ assert_series_equal(result, expected)
+ assert not result.equals(second / first)
- p = p.astype('float64')
- result = p['first'] / p['second']
- expected = Series(p['first'].values / p['second'].values)
- assert_series_equal(result, expected)
+ def test_div_equiv_binop(self):
+ # Test Series.div as well as Series.__div__
+ # float/integer issue
+ # GH#7785
+ first = pd.Series([1, 0], name='first')
+ second = pd.Series([-0.01, -0.02], name='second')
+ expected = Series([-0.01, -np.inf])
- p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
- result = p['first'] / p['second']
- assert_series_equal(result, p['first'].astype('float64'),
- check_names=False)
- assert result.name is None
- assert not result.equals(p['second'] / p['first'])
-
- # inf signing
- s = Series([np.nan, 1., -1.])
- result = s / 0
- expected = Series([np.nan, np.inf, -np.inf])
- assert_series_equal(result, expected)
+ result = second.div(first)
+ assert_series_equal(result, expected, check_names=False)
- # float/integer issue
- # GH 7785
- p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
- expected = Series([-0.01, -np.inf])
+ result = second / first
+ assert_series_equal(result, expected)
- result = p['second'].div(p['first'])
- assert_series_equal(result, expected, check_names=False)
+ def test_rdiv_zero_compat(self):
+ # GH#8674
+ zero_array = np.array([0] * 5)
+ data = np.random.randn(5)
+ expected = pd.Series([0.] * 5)
- result = p['second'] / p['first']
- assert_series_equal(result, expected)
+ result = zero_array / pd.Series(data)
+ assert_series_equal(result, expected)
- # GH 9144
- s = Series([-1, 0, 1])
+ result = pd.Series(zero_array) / data
+ assert_series_equal(result, expected)
- result = 0 / s
- expected = Series([0.0, nan, 0.0])
- assert_series_equal(result, expected)
+ result = pd.Series(zero_array) / pd.Series(data)
+ assert_series_equal(result, expected)
- result = s / 0
- expected = Series([-inf, nan, inf])
- assert_series_equal(result, expected)
+ def test_div_zero_inf_signs(self):
+ # GH#9144, inf signing
+ ser = Series([-1, 0, 1], name='first')
+ expected = Series([-np.inf, np.nan, np.inf], name='first')
- result = s // 0
- expected = Series([-inf, nan, inf])
- assert_series_equal(result, expected)
+ result = ser / 0
+ assert_series_equal(result, expected)
- # GH 8674
- zero_array = np.array([0] * 5)
- data = np.random.randn(5)
- expected = pd.Series([0.] * 5)
- result = zero_array / pd.Series(data)
- assert_series_equal(result, expected)
+ def test_rdiv_zero(self):
+ # GH#9144
+ ser = Series([-1, 0, 1], name='first')
+ expected = Series([0.0, np.nan, 0.0], name='first')
- result = pd.Series(zero_array) / data
- assert_series_equal(result, expected)
+ result = 0 / ser
+ assert_series_equal(result, expected)
- result = pd.Series(zero_array) / pd.Series(data)
- assert_series_equal(result, expected)
+ def test_floordiv_div(self):
+ # GH#9144
+ ser = Series([-1, 0, 1], name='first')
+
+ result = ser // 0
+ expected = Series([-inf, nan, inf], name='first')
+ assert_series_equal(result, expected)
class TestTimedeltaSeriesArithmeticWithIntegers(object):
@@ -1576,33 +1580,42 @@ def test_dt64_series_add_intlike(self, tz):
class TestSeriesOperators(TestData):
- def test_op_method(self):
- def check(series, other, check_reverse=False):
- simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
- if not compat.PY3:
- simple_ops.append('div')
-
- for opname in simple_ops:
- op = getattr(Series, opname)
-
- if op == 'div':
- alt = operator.truediv
- else:
- alt = getattr(operator, opname)
-
- result = op(series, other)
- expected = alt(series, other)
- assert_almost_equal(result, expected)
- if check_reverse:
- rop = getattr(Series, "r" + opname)
- result = rop(series, other)
- expected = alt(other, series)
- assert_almost_equal(result, expected)
+ @pytest.mark.parametrize(
+ 'ts',
+ [
+ (lambda x: x, lambda x: x * 2, False),
+ (lambda x: x, lambda x: x[::2], False),
+ (lambda x: x, lambda x: 5, True),
+ (lambda x: tm.makeFloatSeries(),
+ lambda x: tm.makeFloatSeries(),
+ True)
+ ])
+ @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
+ 'truediv', 'div', 'pow'])
+ def test_op_method(self, opname, ts):
+ # check that Series.{opname} behaves like Series.__{opname}__,
+ series = ts[0](self.ts)
+ other = ts[1](self.ts)
+ check_reverse = ts[2]
+
+ if opname == 'div' and compat.PY3:
+ pytest.skip('div test only for Py3')
+
+ op = getattr(Series, opname)
+
+ if op == 'div':
+ alt = operator.truediv
+ else:
+ alt = getattr(operator, opname)
- check(self.ts, self.ts * 2)
- check(self.ts, self.ts[::2])
- check(self.ts, 5, check_reverse=True)
- check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
+ result = op(series, other)
+ expected = alt(series, other)
+ assert_almost_equal(result, expected)
+ if check_reverse:
+ rop = getattr(Series, "r" + opname)
+ result = rop(series, other)
+ expected = alt(other, series)
+ assert_almost_equal(result, expected)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
@@ -1971,20 +1984,15 @@ def test_operators_corner(self):
index=self.ts.index[:-5], name='ts')
tm.assert_series_equal(added[:-5], expected)
- def test_operators_reverse_object(self):
+ @pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul,
+ operator.truediv, operator.floordiv])
+ def test_operators_reverse_object(self, op):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
- def _check_op(arr, op):
- result = op(1., arr)
- expected = op(1., arr.astype(float))
- assert_series_equal(result.astype(float), expected)
-
- _check_op(arr, operator.add)
- _check_op(arr, operator.sub)
- _check_op(arr, operator.mul)
- _check_op(arr, operator.truediv)
- _check_op(arr, operator.floordiv)
+ result = op(1., arr)
+ expected = op(1., arr.astype(float))
+ assert_series_equal(result.astype(float), expected)
def test_arith_ops_df_compat(self):
# GH 1134
| The upcoming fix(es) for #19322 are going to involve parametrizing a bunch of variants of division by zero. This separates out the existing test cases into method-specific tests, some of which will be parametrized in upcoming PRs.
This PR does not change the aggregate contents of the tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/19336 | 2018-01-22T00:21:09Z | 2018-02-08T11:28:55Z | 2018-02-08T11:28:55Z | 2018-02-08T14:14:13Z |
DOC: organize 0.23 bug fix whatsnew | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index fe5342c520196..ad0f4bdbcbac2 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -398,67 +398,82 @@ Bug Fixes
~~~~~~~~~
-Conversion
-^^^^^^^^^^
+Datetimelike
+^^^^^^^^^^^^
-- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`)
-- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
-- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`)
+- Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`)
+- Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`)
+- Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`)
+- Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`)
- Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`)
-- Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`)
-- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`)
-- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`)
-- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`)
- Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`)
-- Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`)
-- Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`)
-- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`)
- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`)
- Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`)
-- Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`)
-- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`)
-- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`)
- Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`)
- Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`)
- Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`)
-- Fixed bug where comparing :class:`DatetimeIndex` failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`)
- Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`)
- Bug where dividing a scalar timedelta-like object with :class:`TimedeltaIndex` performed the reciprocal operation (:issue:`19125`)
-- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`)
+- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
+- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
+- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`)
+
+Timezones
+^^^^^^^^^
+
+- Bug in creating a ``Series`` from an array that contains both tz-naive and tz-aware values will result in a ``Series`` whose dtype is tz-aware instead of object (:issue:`16406`)
+- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`)
+- Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`)
+- Bug in comparing :class:`DatetimeIndex`, which failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`)
- Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`)
- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`)
-- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`)
+- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`)
+- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`)
+
+Offsets
+^^^^^^^
+
+- Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`)
+- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`)
+- Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`)
+- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`)
+- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`)
+Numeric
+^^^^^^^
+- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`)
+- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`)
+- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
+- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`)
+
-
-- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
-- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
-- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`)
+
Indexing
^^^^^^^^
-- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`)
-- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`)
-- Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`)
-- Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`)
-- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`)
- Bug in :class:`Index` construction from list of mixed type tuples (:issue:`18505`)
- Bug in :func:`Index.drop` when passing a list of both tuples and non-tuples (:issue:`18304`)
-- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`)
-- Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`)
+- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`)
- Bug in indexing a datetimelike ``Index`` that raised ``ValueError`` instead of ``IndexError`` (:issue:`18386`).
-- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`)
- :func:`Index.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`)
- :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`)
- Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`)
-- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`)
- Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`)
-- Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`)
-- Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`)
- Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`)
+- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`)
+- Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`)
- Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`)
-- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`)
+
+MultiIndex
+^^^^^^^^^^
+
+- Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`)
+- Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`)
+- Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`)
+- Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`)
+- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`)
-
I/O
@@ -488,6 +503,7 @@ Groupby/Resample/Rolling
- Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`)
- Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`)
- Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`)
+- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`)
-
Sparse
@@ -512,14 +528,6 @@ Reshaping
- Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`)
-
-Numeric
-^^^^^^^
-
-- Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`)
-- Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`)
-- Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`)
-- Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`)
--
Categorical
^^^^^^^^^^^
@@ -529,6 +537,9 @@ Categorical
when all the categoricals had the same categories, but in a different order.
This affected :func:`pandas.concat` with Categorical data (:issue:`19096`).
- Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`)
+- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`)
+- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`)
+- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`)
-
Other
| https://api.github.com/repos/pandas-dev/pandas/pulls/19335 | 2018-01-21T22:27:38Z | 2018-01-21T22:28:07Z | 2018-01-21T22:28:07Z | 2018-01-21T22:28:07Z | |
Split test_ops_compat into targeted tests | diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 1a6aabc2f258f..ef6523a9eb270 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -442,39 +442,36 @@ def test_tdi_floordiv_timedelta_scalar(self, scalar_td):
res = tdi // (scalar_td)
tm.assert_index_equal(res, expected)
- # TODO: Split by operation, better name
- def test_ops_compat(self):
+ def test_tdi_floordiv_tdlike_scalar(self, delta):
+ tdi = timedelta_range('1 days', '10 days', name='foo')
+ expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
- offsets = [pd.offsets.Hour(2), timedelta(hours=2),
- np.timedelta64(2, 'h'), Timedelta(hours=2)]
+ result = tdi // delta
+ tm.assert_index_equal(result, expected, exact=False)
+ def test_tdi_mul_tdlike_scalar_raises(self, delta):
rng = timedelta_range('1 days', '10 days', name='foo')
+ with pytest.raises(TypeError):
+ rng * delta
- # multiply
- for offset in offsets:
- pytest.raises(TypeError, lambda: rng * offset)
+ def test_tdi_div_nat_raises(self):
+ # don't allow division by NaT (make could in the future)
+ rng = timedelta_range('1 days', '10 days', name='foo')
+ with pytest.raises(TypeError):
+ rng / pd.NaT
- # divide
+ def test_tdi_div_tdlike_scalar(self, delta):
+ rng = timedelta_range('1 days', '10 days', name='foo')
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
- for offset in offsets:
- result = rng / offset
- tm.assert_index_equal(result, expected, exact=False)
- # floor divide
- expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
- for offset in offsets:
- result = rng // offset
- tm.assert_index_equal(result, expected, exact=False)
+ result = rng / delta
+ tm.assert_index_equal(result, expected, exact=False)
- # divide with nats
+ def test_tdi_div_tdlike_scalar_with_nat(self, delta):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
- for offset in offsets:
- result = rng / offset
- tm.assert_index_equal(result, expected)
-
- # don't allow division by NaT (make could in the future)
- pytest.raises(TypeError, lambda: rng / pd.NaT)
+ result = rng / delta
+ tm.assert_index_equal(result, expected)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
| This and a couple other test-organizing PRs are being split off of a WIP branch to fix division-by-zero consistency.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19334 | 2018-01-21T22:05:37Z | 2018-01-22T11:14:47Z | 2018-01-22T11:14:47Z | 2018-02-11T21:58:49Z |
Fix Index __mul__-like ops with timedelta scalars | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index a4b943f995a33..7e03449f2ac57 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -744,6 +744,7 @@ Timedelta
- Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`)
- Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`)
- Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. ``Timedelta('30S').total_seconds()==30.000000000000004`` (:issue:`19458`)
+- Multiplication of :class:`TimedeltaIndex` by ``TimedeltaIndex`` will now raise ``TypeError`` instead of raising ``ValueError`` in cases of length mis-match (:issue`19333`)
-
Timezones
@@ -778,6 +779,7 @@ Numeric
- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`)
- Bug in :class:`DataFrame` flex arithmetic (e.g. ``df.add(other, fill_value=foo)``) with a ``fill_value`` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`)
+- Multiplication and division of numeric-dtyped :class:`Index` objects with timedelta-like scalars returns ``TimedeltaIndex`` instead of raising ``TypeError`` (:issue:`19333`)
Indexing
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7dfa34bd634ad..59fe4bba649d3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
- Timestamp)
+ Timestamp, Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
@@ -16,7 +16,7 @@
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
- ABCPeriodIndex,
+ ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
@@ -3918,7 +3918,21 @@ def dropna(self, how='any'):
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False):
- raise TypeError("can only perform ops with timedelta like values")
+ # Timedelta knows how to operate with np.array, so dispatch to that
+ # operation and then wrap the results
+ other = Timedelta(other)
+ values = self.values
+ if reversed:
+ values, other = other, values
+
+ with np.errstate(all='ignore'):
+ result = op(values, other)
+
+ attrs = self._get_attributes_dict()
+ attrs = self._maybe_update_attributes(attrs)
+ if op == divmod:
+ return Index(result[0], **attrs), Index(result[1], **attrs)
+ return Index(result, **attrs)
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
@@ -4061,6 +4075,9 @@ def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
+ elif isinstance(other, ABCTimedeltaIndex):
+ # Defer to subclass implementation
+ return NotImplemented
other = self._validate_for_numeric_binop(other, op, opstr)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 0ed92a67c7e14..0ac415ee0b701 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,5 +1,6 @@
from sys import getsizeof
import operator
+from datetime import timedelta
import numpy as np
from pandas._libs import index as libindex
@@ -8,7 +9,7 @@
is_integer,
is_scalar,
is_int64_dtype)
-from pandas.core.dtypes.generic import ABCSeries
+from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex
from pandas import compat
from pandas.compat import lrange, range, get_range_parameters
@@ -587,6 +588,15 @@ def _make_evaluate_binop(op, opstr, reversed=False, step=False):
def _evaluate_numeric_binop(self, other):
if isinstance(other, ABCSeries):
return NotImplemented
+ elif isinstance(other, ABCTimedeltaIndex):
+ # Defer to TimedeltaIndex implementation
+ return NotImplemented
+ elif isinstance(other, (timedelta, np.timedelta64)):
+ # GH#19333 is_integer evaluated True on timedelta64,
+ # so we need to catch these explicitly
+ if reversed:
+ return op(other, self._int64index)
+ return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op, opstr)
attrs = self._get_attributes_dict()
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index c6883df7ee91a..bafb6ae2e45f4 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -13,7 +13,7 @@
import pandas.util.testing as tm
import pandas as pd
-from pandas._libs.tslib import Timestamp
+from pandas._libs.tslib import Timestamp, Timedelta
from pandas.tests.indexes.common import Base
@@ -26,6 +26,42 @@ def full_like(array, value):
return ret
+class TestIndexArithmeticWithTimedeltaScalar(object):
+
+ @pytest.mark.parametrize('index', [
+ Int64Index(range(1, 11)),
+ UInt64Index(range(1, 11)),
+ Float64Index(range(1, 11)),
+ RangeIndex(1, 11)])
+ @pytest.mark.parametrize('scalar_td', [Timedelta(days=1),
+ Timedelta(days=1).to_timedelta64(),
+ Timedelta(days=1).to_pytimedelta()])
+ def test_index_mul_timedelta(self, scalar_td, index):
+ # GH#19333
+ expected = pd.timedelta_range('1 days', '10 days')
+
+ result = index * scalar_td
+ tm.assert_index_equal(result, expected)
+ commute = scalar_td * index
+ tm.assert_index_equal(commute, expected)
+
+ @pytest.mark.parametrize('index', [Int64Index(range(1, 3)),
+ UInt64Index(range(1, 3)),
+ Float64Index(range(1, 3)),
+ RangeIndex(1, 3)])
+ @pytest.mark.parametrize('scalar_td', [Timedelta(days=1),
+ Timedelta(days=1).to_timedelta64(),
+ Timedelta(days=1).to_pytimedelta()])
+ def test_index_rdiv_timedelta(self, scalar_td, index):
+ expected = pd.TimedeltaIndex(['1 Day', '12 Hours'])
+
+ result = scalar_td / index
+ tm.assert_index_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ index / scalar_td
+
+
class Numeric(Base):
def test_numeric_compat(self):
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 3dc60ed33b958..c6e5b477a2a06 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -351,7 +351,7 @@ def test_dti_mul_dti_raises(self):
def test_dti_mul_too_short_raises(self):
idx = self._holder(np.arange(5, dtype='int64'))
- with pytest.raises(ValueError):
+ with pytest.raises(TypeError):
idx * self._holder(np.arange(3))
with pytest.raises(ValueError):
idx * np.array([1, 2])
@@ -527,6 +527,20 @@ def test_tdi_div_tdlike_scalar_with_nat(self, delta):
result = rng / delta
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize('other', [np.arange(1, 11),
+ pd.Int64Index(range(1, 11)),
+ pd.UInt64Index(range(1, 11)),
+ pd.Float64Index(range(1, 11)),
+ pd.RangeIndex(1, 11)])
+ def test_tdi_rmul_arraylike(self, other):
+ tdi = TimedeltaIndex(['1 Day'] * 10)
+ expected = timedelta_range('1 days', '10 days')
+
+ result = other * tdi
+ tm.assert_index_equal(result, expected)
+ commute = tdi * other
+ tm.assert_index_equal(commute, expected)
+
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
| Fixes the following current behavior for each of the numeric index classes and each of the basic timedelta-like scalars:
```
idx = pd.Index(range(3))
td = pd.Timedelta(days=1)
>>> idx * td
TypeError: can only perform ops with timedelta like values
>>> td * idx
TypeError: can only perform ops with timedelta like values
>>> td / idx
TypeError: can only perform ops with timedelta like values
>>> td // idx
TypeError: can only perform ops with timedelta like values
```
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19333 | 2018-01-21T21:46:55Z | 2018-02-22T01:37:34Z | 2018-02-22T01:37:34Z | 2018-02-22T05:45:23Z |
Refactor test_parquet.py to use check_round_trip at module level | diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index d472a5ed23c75..8a6a22abe23fa 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -110,48 +110,79 @@ def df_full():
pd.Timestamp('20130103')]})
-def test_invalid_engine(df_compat):
+def check_round_trip(df, engine=None, path=None,
+ write_kwargs=None, read_kwargs=None,
+ expected=None, check_names=True,
+ repeat=2):
+ """Verify parquet serializer and deserializer produce the same results.
+
+ Performs a pandas to disk and disk to pandas round trip,
+ then compares the 2 resulting DataFrames to verify equality.
+
+ Parameters
+ ----------
+ df: Dataframe
+ engine: str, optional
+ 'pyarrow' or 'fastparquet'
+ path: str, optional
+ write_kwargs: dict of str:str, optional
+ read_kwargs: dict of str:str, optional
+ expected: DataFrame, optional
+ Expected deserialization result, otherwise will be equal to `df`
+ check_names: list of str, optional
+ Closed set of column names to be compared
+ repeat: int, optional
+ How many times to repeat the test
+ """
+
+ write_kwargs = write_kwargs or {'compression': None}
+ read_kwargs = read_kwargs or {}
+
+ if expected is None:
+ expected = df
+
+ if engine:
+ write_kwargs['engine'] = engine
+ read_kwargs['engine'] = engine
+
+ def compare(repeat):
+ for _ in range(repeat):
+ df.to_parquet(path, **write_kwargs)
+ actual = read_parquet(path, **read_kwargs)
+ tm.assert_frame_equal(expected, actual,
+ check_names=check_names)
+
+ if path is None:
+ with tm.ensure_clean() as path:
+ compare(repeat)
+ else:
+ compare(repeat)
+
+def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
- df_compat.to_parquet('foo', 'bar')
+ check_round_trip(df_compat, 'foo', 'bar')
def test_options_py(df_compat, pa):
# use the set option
- df = df_compat
- with tm.ensure_clean() as path:
-
- with pd.option_context('io.parquet.engine', 'pyarrow'):
- df.to_parquet(path)
-
- result = read_parquet(path)
- tm.assert_frame_equal(result, df)
+ with pd.option_context('io.parquet.engine', 'pyarrow'):
+ check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
- df = df_compat
- with tm.ensure_clean() as path:
-
- with pd.option_context('io.parquet.engine', 'fastparquet'):
- df.to_parquet(path, compression=None)
-
- result = read_parquet(path)
- tm.assert_frame_equal(result, df)
+ with pd.option_context('io.parquet.engine', 'fastparquet'):
+ check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
+ # use the set option
- df = df_compat
- with tm.ensure_clean() as path:
-
- with pd.option_context('io.parquet.engine', 'auto'):
- df.to_parquet(path)
-
- result = read_parquet(path)
- tm.assert_frame_equal(result, df)
+ with pd.option_context('io.parquet.engine', 'auto'):
+ check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
@@ -228,53 +259,23 @@ def check_error_on_write(self, df, engine, exc):
with tm.ensure_clean() as path:
to_parquet(df, path, engine, compression=None)
- def check_round_trip(self, df, engine, expected=None, path=None,
- write_kwargs=None, read_kwargs=None,
- check_names=True):
-
- if write_kwargs is None:
- write_kwargs = {'compression': None}
-
- if read_kwargs is None:
- read_kwargs = {}
-
- if expected is None:
- expected = df
-
- if path is None:
- with tm.ensure_clean() as path:
- check_round_trip_equals(df, path, engine,
- write_kwargs=write_kwargs,
- read_kwargs=read_kwargs,
- expected=expected,
- check_names=check_names)
- else:
- check_round_trip_equals(df, path, engine,
- write_kwargs=write_kwargs,
- read_kwargs=read_kwargs,
- expected=expected,
- check_names=check_names)
-
class TestBasic(Base):
def test_error(self, engine):
-
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
-
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# unicode
df.columns = [u'foo', u'bar']
- self.check_round_trip(df, engine)
+ check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
-
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
@@ -302,8 +303,7 @@ def test_compression(self, engine, compression):
pytest.importorskip('brotli')
df = pd.DataFrame({'A': [1, 2, 3]})
- self.check_round_trip(df, engine,
- write_kwargs={'compression': compression})
+ check_round_trip(df, engine, write_kwargs={'compression': compression})
def test_read_columns(self, engine):
# GH18154
@@ -311,8 +311,8 @@ def test_read_columns(self, engine):
'int': list(range(1, 4))})
expected = pd.DataFrame({'string': list('abc')})
- self.check_round_trip(df, engine, expected=expected,
- read_kwargs={'columns': ['string']})
+ check_round_trip(df, engine, expected=expected,
+ read_kwargs={'columns': ['string']})
def test_write_index(self, engine):
check_names = engine != 'fastparquet'
@@ -323,7 +323,7 @@ def test_write_index(self, engine):
pytest.skip("pyarrow is < 0.7.0")
df = pd.DataFrame({'A': [1, 2, 3]})
- self.check_round_trip(df, engine)
+ check_round_trip(df, engine)
indexes = [
[2, 3, 4],
@@ -334,12 +334,12 @@ def test_write_index(self, engine):
# non-default index
for index in indexes:
df.index = index
- self.check_round_trip(df, engine, check_names=check_names)
+ check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
- self.check_round_trip(df, engine)
+ check_round_trip(df, engine)
def test_write_multiindex(self, pa_ge_070):
# Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version
@@ -348,7 +348,7 @@ def test_write_multiindex(self, pa_ge_070):
df = pd.DataFrame({'A': [1, 2, 3]})
index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df.index = index
- self.check_round_trip(df, engine)
+ check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
@@ -357,7 +357,6 @@ def test_write_column_multiindex(self, engine):
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa_ge_070):
-
engine = pa_ge_070
dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS')
df = pd.DataFrame(np.random.randn(2 * len(dates), 3),
@@ -368,14 +367,10 @@ def test_multiindex_with_columns(self, pa_ge_070):
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
- with tm.ensure_clean() as path:
- df.to_parquet(path, engine)
- result = read_parquet(path, engine)
- expected = df
- tm.assert_frame_equal(result, expected)
- result = read_parquet(path, engine, columns=['A', 'B'])
- expected = df[['A', 'B']]
- tm.assert_frame_equal(result, expected)
+
+ check_round_trip(df, engine)
+ check_round_trip(df, engine, read_kwargs={'columns': ['A', 'B']},
+ expected=df[['A', 'B']])
class TestParquetPyArrow(Base):
@@ -391,7 +386,7 @@ def test_basic(self, pa, df_full):
tz='Europe/Brussels')
df['bool_with_none'] = [True, None, True]
- self.check_round_trip(df, pa)
+ check_round_trip(df, pa)
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)")
def test_basic_subset_columns(self, pa, df_full):
@@ -402,8 +397,8 @@ def test_basic_subset_columns(self, pa, df_full):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
- self.check_round_trip(df, pa, expected=df[['string', 'int']],
- read_kwargs={'columns': ['string', 'int']})
+ check_round_trip(df, pa, expected=df[['string', 'int']],
+ read_kwargs={'columns': ['string', 'int']})
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
@@ -433,7 +428,7 @@ def test_categorical(self, pa_ge_070):
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
- self.check_round_trip(df, pa, expected)
+ check_round_trip(df, pa, expected=expected)
def test_categorical_unsupported(self, pa_lt_070):
pa = pa_lt_070
@@ -444,20 +439,19 @@ def test_categorical_unsupported(self, pa_lt_070):
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
- self.check_round_trip(df_compat, pa,
- path='s3://pandas-test/pyarrow.parquet')
+ check_round_trip(df_compat, pa,
+ path='s3://pandas-test/pyarrow.parquet')
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
-
df = df_full
# additional supported types for fastparquet
df['timedelta'] = pd.timedelta_range('1 day', periods=3)
- self.check_round_trip(df, fp)
+ check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
@@ -470,7 +464,7 @@ def test_duplicate_columns(self, fp):
def test_bool_with_none(self, fp):
df = pd.DataFrame({'a': [True, None, False]})
expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16')
- self.check_round_trip(df, fp, expected=expected)
+ check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
@@ -486,7 +480,7 @@ def test_categorical(self, fp):
if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"):
pytest.skip("CategoricalDtype not supported for older fp")
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
- self.check_round_trip(df, fp)
+ check_round_trip(df, fp)
def test_datetime_tz(self, fp):
# doesn't preserve tz
@@ -495,7 +489,7 @@ def test_datetime_tz(self, fp):
# warns on the coercion
with catch_warnings(record=True):
- self.check_round_trip(df, fp, df.astype('datetime64[ns]'))
+ check_round_trip(df, fp, expected=df.astype('datetime64[ns]'))
def test_filter_row_groups(self, fp):
d = {'a': list(range(0, 3))}
@@ -508,5 +502,5 @@ def test_filter_row_groups(self, fp):
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
- self.check_round_trip(df_compat, fp,
- path='s3://pandas-test/fastparquet.parquet')
+ check_round_trip(df_compat, fp,
+ path='s3://pandas-test/fastparquet.parquet')
| Refactoring and unification of testing approach in `test_parquet.py` module.
Iteration upon work that was done for https://github.com/pandas-dev/pandas/pull/19135#issuecomment-358161165
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] tests added / passed
| https://api.github.com/repos/pandas-dev/pandas/pulls/19332 | 2018-01-21T19:39:38Z | 2018-01-23T11:36:51Z | 2018-01-23T11:36:51Z | 2018-05-16T05:07:38Z |
TST: Clean up json/test_compression.py | diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index a83ec53904b28..2cf4c435bdc12 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -1,38 +1,10 @@
import pytest
import pandas as pd
-from pandas import compat
import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
-COMPRESSION_TYPES = [None, 'bz2', 'gzip',
- pytest.param('xz', marks=td.skip_if_no_lzma)]
-
-
-def decompress_file(path, compression):
- if compression is None:
- f = open(path, 'rb')
- elif compression == 'gzip':
- import gzip
- f = gzip.GzipFile(path, 'rb')
- elif compression == 'bz2':
- import bz2
- f = bz2.BZ2File(path, 'rb')
- elif compression == 'xz':
- lzma = compat.import_lzma()
- f = lzma.open(path, 'rb')
- else:
- msg = 'Unrecognized compression type: {}'.format(compression)
- raise ValueError(msg)
-
- result = f.read().decode('utf8')
- f.close()
- return result
-
-
-@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
@@ -43,8 +15,9 @@ def test_compression_roundtrip(compression):
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
- uncompressed_content = decompress_file(path, compression)
- assert_frame_equal(df, pd.read_json(uncompressed_content))
+ with tm.decompress_file(path, compression) as fh:
+ result = fh.read().decode('utf8')
+ assert_frame_equal(df, pd.read_json(result))
def test_compress_zip_value_error():
@@ -67,7 +40,6 @@ def test_read_zipped_json():
assert_frame_equal(uncompressed_df, compressed_df)
-@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
@@ -88,7 +60,6 @@ def test_with_s3_url(compression):
assert_frame_equal(df, roundtripped_df)
-@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
@@ -98,7 +69,6 @@ def test_lines_with_compression(compression):
assert_frame_equal(df, roundtripped_df)
-@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_chunksize_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
| xref #19226
Use new ``decompress_file`` utility and compression fixture to clean up json compression tests | https://api.github.com/repos/pandas-dev/pandas/pulls/19331 | 2018-01-21T15:50:00Z | 2018-01-21T17:50:06Z | 2018-01-21T17:50:06Z | 2018-01-22T19:51:27Z |
BUG: DatetimeIndex(tz) & single column name, return empty df (GH19157) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 4dde76dee46a5..3016b0490873b 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -431,6 +431,7 @@ Timezones
- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`)
- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`)
- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`)
+- Bug in the :class:`DataFrame` constructor, where tz-aware Datetimeindex and a given column name will result in an empty ``DataFrame`` (:issue:`19157`)
Offsets
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 847779b1747cf..7328cd336babf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -512,7 +512,11 @@ def _get_axes(N, K, index=index, columns=columns):
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
- return self._init_dict({0: values}, index, columns, dtype=dtype)
+ # GH19157
+ if columns is None:
+ columns = [0]
+ return _arrays_to_mgr([values], columns, index, columns,
+ dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b7d3a60ecf6e4..8b57e96e6fa06 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2092,3 +2092,14 @@ def test_frame_timeseries_to_records(self):
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
+
+ def test_frame_timeseries_column(self):
+ # GH19157
+ dr = date_range(start='20130101T10:00:00', periods=3, freq='T',
+ tz='US/Eastern')
+ result = DataFrame(dr, columns=['timestamps'])
+ expected = DataFrame({'timestamps': [
+ Timestamp('20130101T10:00:00', tz='US/Eastern'),
+ Timestamp('20130101T10:01:00', tz='US/Eastern'),
+ Timestamp('20130101T10:02:00', tz='US/Eastern')]})
+ tm.assert_frame_equal(result, expected)
| - [x] closes #19157
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This issue is due to self._init_dict({0: values}, index, columns, dtype=dtype) will call for the filtering if columns passed (`data = {k: v for k, v in compat.iteritems(data) if k in columns}`), see function ``_init_dict`` of [frame.py](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py)
So `self._init_dict({0: values}, index, columns, dtype=dtype)` expects the column name of values as '0', but since we pass a column with a different name, upon filtering it will result in an empty DataFrame.
My solution assumes that the conversion of a series of DatetimeIndex with tz_info. Hence, we will initialize a DataFrame according to the given column name. If no column name specified, index '0' is chosen. I introduced an assertion to warn the users if multiple column names are passed.
Update 27-01-2018: The updated PR includes a test, and update on whatsnew entry. The revised solution uses ``_arrays_to_mgr`` instead, such that a default column name 0 is specified if ``columns`` not specified. | https://api.github.com/repos/pandas-dev/pandas/pulls/19330 | 2018-01-21T13:57:58Z | 2018-01-27T16:55:24Z | 2018-01-27T16:55:23Z | 2018-01-27T16:55:48Z |
Change IntervalIndex set-ops error code type | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 86fc47dee09fc..cf3c3089750f8 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -312,6 +312,7 @@ Other API Changes
- Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`)
- :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`)
- Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`)
+- Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`)
.. _whatsnew_0230.deprecations:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 58b1bdb3f55ea..68145ebaed7e7 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1152,12 +1152,17 @@ def insert(self, loc, item):
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
- def _as_like_interval_index(self, other, error_msg):
+ def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = _ensure_index(other)
- if (not isinstance(other, IntervalIndex) or
- self.closed != other.closed):
- raise ValueError(error_msg)
+ if not isinstance(other, IntervalIndex):
+ msg = ('the other index needs to be an IntervalIndex too, but '
+ 'was type {}').format(other.__class__.__name__)
+ raise TypeError(msg)
+ elif self.closed != other.closed:
+ msg = ('can only do set operations between two IntervalIndex '
+ 'objects that are closed on the same side')
+ raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
@@ -1296,9 +1301,7 @@ def equals(self, other):
def _setop(op_name):
def func(self, other):
- msg = ('can only do set operations between two IntervalIndex '
- 'objects that are closed on the same side')
- other = self._as_like_interval_index(other, msg)
+ other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 9895ee06a22c0..345d3a9a0878b 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -934,12 +934,14 @@ def test_set_operation_errors(self, closed, op_name):
set_op = getattr(index, op_name)
# non-IntervalIndex
- msg = ('can only do set operations between two IntervalIndex objects '
- 'that are closed on the same side')
- with tm.assert_raises_regex(ValueError, msg):
+ msg = ('the other index needs to be an IntervalIndex too, but '
+ 'was type Int64Index')
+ with tm.assert_raises_regex(TypeError, msg):
set_op(Index([1, 2, 3]))
# mixed closed
+ msg = ('can only do set operations between two IntervalIndex objects '
+ 'that are closed on the same side')
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
| - [x] xref #19021
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Set operations (union, difference...) on ``IntervalIndex`` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError``.
This PR is needed to make the changes requested in #19021.
EDIT: I've improved the error message also. Previously, you'd get:
```python
>>> pd.IntervalIndex.from_breaks([0,1,2,3]).union(pd.RangeIndex(3))
ValueError: can only do set operations between two IntervalIndex objects that are closed on the same side
```
Which made no sense in this case. Now we get:
```python
>>> pd.IntervalIndex.from_breaks([0,1,2,3]).union(pd.RangeIndex(3))
TypeError: the other index needs to be an IntervalIndex too, but was type RangeIndex
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/19329 | 2018-01-21T06:00:52Z | 2018-01-21T15:50:03Z | 2018-01-21T15:50:03Z | 2018-01-22T07:04:50Z |
small cleanups aggregated | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6d0a415f5b420..105fe9622a93f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
- Timestamp, Timedelta, )
+ Timestamp)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
@@ -3979,7 +3979,7 @@ def _validate_for_numeric_binop(self, other, op, opstr):
internal method called by ops
"""
# if we are an inheritor of numeric,
- # but not actually numeric (e.g. DatetimeIndex/PeriodInde)
+ # but not actually numeric (e.g. DatetimeIndex/PeriodIndex)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}".format(
@@ -4006,7 +4006,7 @@ def _validate_for_numeric_binop(self, other, op, opstr):
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (ABCDateOffset, np.timedelta64,
- Timedelta, datetime.timedelta)):
+ datetime.timedelta)):
# higher up to handle
pass
elif isinstance(other, (Timestamp, np.datetime64)):
@@ -4031,13 +4031,13 @@ def _evaluate_numeric_binop(self, other):
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64,
- Timedelta, datetime.timedelta)):
+ datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr,
reversed)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
- # if we are a reversed non-communative op
+ # if we are a reversed non-commutative op
values = self.values
if reversed:
values, other = other, values
@@ -4081,11 +4081,8 @@ def _evaluate_numeric_binop(self, other):
cls.__divmod__ = _make_evaluate_binop(
divmod,
'__divmod__',
- constructor=lambda result, **attrs: (
- Index(result[0], **attrs),
- Index(result[1], **attrs),
- ),
- )
+ constructor=lambda result, **attrs: (Index(result[0], **attrs),
+ Index(result[1], **attrs)))
@classmethod
def _add_numeric_methods_unary(cls):
@@ -4275,8 +4272,7 @@ def _ensure_index(index_like, copy=False):
def _get_na_value(dtype):
if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype):
return libts.NaT
- return {np.datetime64: libts.NaT,
- np.timedelta64: libts.NaT}.get(dtype, np.nan)
+ return np.nan
def _ensure_has_len(seq):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 10a923c056be2..bafccbf35dae3 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -595,7 +595,7 @@ def _evaluate_numeric_binop(self, other):
self, other = other, self
try:
- # alppy if we have an override
+ # apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(self._step, other)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index fc04d9d291bf9..d6922182e47c7 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -150,22 +150,7 @@ def names(x):
return new_methods
-def add_methods(cls, new_methods, force, select, exclude):
- if select and exclude:
- raise TypeError("May only pass either select or exclude")
-
- if select:
- select = set(select)
- methods = {}
- for key, method in new_methods.items():
- if key in select:
- methods[key] = method
- new_methods = methods
-
- if exclude:
- for k in exclude:
- new_methods.pop(k, None)
-
+def add_methods(cls, new_methods, force):
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
@@ -175,8 +160,8 @@ def add_methods(cls, new_methods, force, select, exclude):
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
- use_numexpr=True, force=False, select=None,
- exclude=None, have_divmod=False):
+ use_numexpr=True, force=False,
+ have_divmod=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
@@ -195,10 +180,6 @@ def add_special_arithmetic_methods(cls, arith_method=None,
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
- select : iterable of strings (optional)
- if passed, only sets functions with names in select
- exclude : iterable of strings (optional)
- if passed, will not set functions with names in exclude
have_divmod : bool, (optional)
should a divmod method be added? this method is special because it
returns a tuple of cls instead of a single element of type cls
@@ -247,14 +228,12 @@ def f(self, other):
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"])))
- add_methods(cls, new_methods=new_methods, force=force, select=select,
- exclude=exclude)
+ add_methods(cls, new_methods=new_methods, force=force)
def add_flex_arithmetic_methods(cls, flex_arith_method,
flex_comp_method=None, flex_bool_method=None,
- use_numexpr=True, force=False, select=None,
- exclude=None):
+ use_numexpr=True, force=False):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
@@ -271,10 +250,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
- select : iterable of strings (optional)
- if passed, only sets functions with names in select
- exclude : iterable of strings (optional)
- if passed, will not set functions with names in exclude
"""
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(flex_arith_method,
@@ -289,8 +264,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
if k in new_methods:
new_methods.pop(k)
- add_methods(cls, new_methods=new_methods, force=force, select=select,
- exclude=exclude)
+ add_methods(cls, new_methods=new_methods, force=force)
def _align_method_SERIES(left, right, align_asobject=False):
@@ -389,16 +363,16 @@ def wrapper(left, right, name=name, na_op=na_op):
return NotImplemented
left, right = _align_method_SERIES(left, right)
+ res_name = _get_series_op_result_name(left, right)
+
if is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
- res_name = _get_series_op_result_name(left, right)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
- res_name = _get_series_op_result_name(left, right)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
@@ -409,7 +383,6 @@ def wrapper(left, right, name=name, na_op=na_op):
rvalues = getattr(rvalues, 'values', rvalues)
result = safe_na_op(lvalues, rvalues)
- res_name = _get_series_op_result_name(left, right)
return construct_result(left, result,
index=left.index, name=res_name, dtype=None)
| A few cleanups that I've found myself making repeatedly in WIP branches, figured it was worth pushing them to avoid cluttering diffs down the road.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19328 | 2018-01-21T00:58:00Z | 2018-01-21T15:10:46Z | 2018-01-21T15:10:46Z | 2018-01-23T04:40:02Z |
DOC: Improve docs (GH19312) for Series.nonzero() | diff --git a/pandas/core/series.py b/pandas/core/series.py
index be40f65186d2d..fc512d23a05ba 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -490,7 +490,7 @@ def compress(self, condition, *args, **kwargs):
def nonzero(self):
"""
- Return the indices of the elements that are non-zero
+ Return the *integer* indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatibility with NumPy, the return value is
@@ -508,6 +508,15 @@ def nonzero(self):
3 4
dtype: int64
+ >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
+ # same return although index of s is different
+ >>> s.nonzero()
+ (array([1, 3]),)
+ >>> s.iloc[s.nonzero()[0]]
+ b 3
+ d 4
+ dtype: int64
+
See Also
--------
numpy.nonzero
| - [x] closes #19312
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19324 | 2018-01-20T15:19:06Z | 2018-01-23T00:10:50Z | 2018-01-23T00:10:50Z | 2018-01-23T14:00:21Z |
Separate test_numeric_compat into method-specific tests | diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 1ce8ade50c071..3de1c4c982654 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -29,20 +29,21 @@ def full_like(array, value):
class Numeric(Base):
def test_numeric_compat(self):
+ pass # override Base method
+ def test_mul_int(self):
idx = self.create_index()
- didx = idx * idx
-
result = idx * 1
tm.assert_index_equal(result, idx)
+ def test_rmul_int(self):
+ idx = self.create_index()
+
result = 1 * idx
tm.assert_index_equal(result, idx)
- # in general not true for RangeIndex
- if not isinstance(idx, RangeIndex):
- result = idx * idx
- tm.assert_index_equal(result, idx ** 2)
+ def test_div_int(self):
+ idx = self.create_index()
# truediv under PY3
result = idx / 1
@@ -57,9 +58,16 @@ def test_numeric_compat(self):
expected = Index(idx.values / 2)
tm.assert_index_equal(result, expected)
+ def test_floordiv_int(self):
+ idx = self.create_index()
+
result = idx // 1
tm.assert_index_equal(result, idx)
+ def test_mul_int_array(self):
+ idx = self.create_index()
+ didx = idx * idx
+
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result, idx * 5)
@@ -67,19 +75,45 @@ def test_numeric_compat(self):
result = idx * np.arange(5, dtype=arr_dtype)
tm.assert_index_equal(result, didx)
+ def test_mul_int_series(self):
+ idx = self.create_index()
+ didx = idx * idx
+
+ arr_dtype = 'uint64' if isinstance(idx, UInt64Index) else 'int64'
result = idx * Series(np.arange(5, dtype=arr_dtype))
tm.assert_series_equal(result, Series(didx))
+ def test_mul_float_series(self):
+ idx = self.create_index()
rng5 = np.arange(5, dtype='float64')
+
result = idx * Series(rng5 + 0.1)
expected = Series(rng5 * (rng5 + 0.1))
tm.assert_series_equal(result, expected)
- # invalid
- pytest.raises(TypeError,
- lambda: idx * date_range('20130101', periods=5))
- pytest.raises(ValueError, lambda: idx * idx[0:3])
- pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
+ def test_mul_index(self):
+ idx = self.create_index()
+
+ # in general not true for RangeIndex
+ if not isinstance(idx, RangeIndex):
+ result = idx * idx
+ tm.assert_index_equal(result, idx ** 2)
+
+ def test_mul_datelike_raises(self):
+ idx = self.create_index()
+ with pytest.raises(TypeError):
+ idx * date_range('20130101', periods=5)
+
+ def test_mul_size_mismatch_raises(self):
+ idx = self.create_index()
+
+ with pytest.raises(ValueError):
+ idx * idx[0:3]
+ with pytest.raises(ValueError):
+ idx * np.array([1, 2])
+
+ def test_divmod(self):
+ idx = self.create_index()
result = divmod(idx, 2)
with np.errstate(all='ignore'):
@@ -95,15 +129,22 @@ def test_numeric_compat(self):
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
+ def test_pow_float(self):
# test power calculations both ways, GH 14973
- expected = pd.Float64Index(2.0**idx.values)
- result = 2.0**idx
- tm.assert_index_equal(result, expected)
+ idx = self.create_index()
expected = pd.Float64Index(idx.values**2.0)
result = idx**2.0
tm.assert_index_equal(result, expected)
+ def test_rpow_float(self):
+ # test power calculations both ways, GH 14973
+ idx = self.create_index()
+
+ expected = pd.Float64Index(2.0**idx.values)
+ result = 2.0**idx
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__')
def test_divmod_series(self):
idx = self.create_index()
| A more reasonably-scoped attempt at #19255.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19321 | 2018-01-20T01:07:20Z | 2018-01-21T15:15:24Z | 2018-01-21T15:15:24Z | 2018-01-21T18:36:35Z |
Centralize and de-duplicate comparison and arith tests | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
new file mode 100644
index 0000000000000..3f4e3877a276a
--- /dev/null
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+
+import pandas as pd
+import pandas.util.testing as tm
+
+
+class TestPeriodFrameArithmetic(object):
+
+ def test_ops_frame_period(self):
+ # GH 13043
+ df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'),
+ pd.Period('2015-02', freq='M')],
+ 'B': [pd.Period('2014-01', freq='M'),
+ pd.Period('2014-02', freq='M')]})
+ assert df['A'].dtype == object
+ assert df['B'].dtype == object
+
+ p = pd.Period('2015-03', freq='M')
+ # dtype will be object because of original dtype
+ exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
+ 'B': np.array([14, 13], dtype=object)})
+ tm.assert_frame_equal(p - df, exp)
+ tm.assert_frame_equal(df - p, -exp)
+
+ df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
+ pd.Period('2015-06', freq='M')],
+ 'B': [pd.Period('2015-05', freq='M'),
+ pd.Period('2015-06', freq='M')]})
+ assert df2['A'].dtype == object
+ assert df2['B'].dtype == object
+
+ exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
+ 'B': np.array([16, 16], dtype=object)})
+ tm.assert_frame_equal(df2 - df, exp)
+ tm.assert_frame_equal(df - df2, -exp)
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 011b33a4d6f35..480f025db17ca 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -1,12 +1,14 @@
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
+import operator
import pytest
import numpy as np
import pandas as pd
+from pandas.compat.numpy import np_datetime64_compat
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
@@ -41,6 +43,187 @@ def addend(request):
return request.param
+class TestDatetimeIndexComparisons(object):
+ # TODO: De-duplicate with test_comparisons_nat below
+ def test_dti_cmp_nat(self):
+ left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
+ pd.Timestamp('2011-01-03')])
+ right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
+
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = rhs == lhs
+ expected = np.array([False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = lhs != rhs
+ expected = np.array([True, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
+
+ expected = np.array([True, True, True])
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
+
+ @pytest.mark.parametrize('op', [operator.eq, operator.ne,
+ operator.gt, operator.ge,
+ operator.lt, operator.le])
+ def test_comparison_tzawareness_compat(self, op):
+ # GH#18162
+ dr = pd.date_range('2016-01-01', periods=6)
+ dz = dr.tz_localize('US/Pacific')
+
+ with pytest.raises(TypeError):
+ op(dr, dz)
+ with pytest.raises(TypeError):
+ op(dr, list(dz))
+ with pytest.raises(TypeError):
+ op(dz, dr)
+ with pytest.raises(TypeError):
+ op(dz, list(dr))
+
+ # Check that there isn't a problem aware-aware and naive-naive do not
+ # raise
+ assert (dr == dr).all()
+ assert (dr == list(dr)).all()
+ assert (dz == dz).all()
+ assert (dz == list(dz)).all()
+
+ # Check comparisons against scalar Timestamps
+ ts = pd.Timestamp('2000-03-14 01:59')
+ ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
+
+ assert (dr > ts).all()
+ with pytest.raises(TypeError):
+ op(dr, ts_tz)
+
+ assert (dz > ts_tz).all()
+ with pytest.raises(TypeError):
+ op(dz, ts)
+
+ @pytest.mark.parametrize('op', [operator.eq, operator.ne,
+ operator.gt, operator.ge,
+ operator.lt, operator.le])
+ def test_nat_comparison_tzawareness(self, op):
+ # GH#19276
+ # tzaware DatetimeIndex should not raise when compared to NaT
+ dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
+ '2014-05-01', '2014-07-01'])
+ expected = np.array([op == operator.ne] * len(dti))
+ result = op(dti, pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = op(dti.tz_localize('US/Pacific'), pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_comparisons_coverage(self):
+ rng = date_range('1/1/2000', periods=10)
+
+ # raise TypeError for now
+ pytest.raises(TypeError, rng.__lt__, rng[3].value)
+
+ result = rng == list(rng)
+ exp = rng == rng
+ tm.assert_numpy_array_equal(result, exp)
+
+ def test_comparisons_nat(self):
+
+ fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
+ fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
+
+ didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
+ '2014-05-01', '2014-07-01'])
+ didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
+ '2014-06-01', '2014-07-01'])
+ darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
+ np_datetime64_compat('2014-03-01 00:00Z'),
+ np_datetime64_compat('nat'), np.datetime64('nat'),
+ np_datetime64_compat('2014-06-01 00:00Z'),
+ np_datetime64_compat('2014-07-01 00:00Z')])
+
+ cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
+
+ # Check pd.NaT is handles as the same as np.nan
+ with tm.assert_produces_warning(None):
+ for idx1, idx2 in cases:
+
+ result = idx1 < idx2
+ expected = np.array([True, False, False, False, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx2 > idx1
+ expected = np.array([True, False, False, False, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 <= idx2
+ expected = np.array([True, False, False, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx2 >= idx1
+ expected = np.array([True, False, False, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 == idx2
+ expected = np.array([False, False, False, False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 != idx2
+ expected = np.array([True, True, True, True, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ with tm.assert_produces_warning(None):
+ for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
+ result = idx1 < val
+ expected = np.array([False, False, False, False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+ result = idx1 > val
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 <= val
+ tm.assert_numpy_array_equal(result, expected)
+ result = idx1 >= val
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 == val
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 != val
+ expected = np.array([True, True, True, True, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ # Check pd.NaT is handles as the same as np.nan
+ with tm.assert_produces_warning(None):
+ for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
+ result = idx1 < val
+ expected = np.array([True, False, False, False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+ result = idx1 > val
+ expected = np.array([False, False, False, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 <= val
+ expected = np.array([True, False, True, False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+ result = idx1 >= val
+ expected = np.array([False, False, True, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 == val
+ expected = np.array([False, False, True, False, False, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 != val
+ expected = np.array([True, True, False, True, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index e3ebb8769db02..49f94bfa65543 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -1,4 +1,3 @@
-import operator
import pytest
@@ -9,7 +8,6 @@
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
-from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
@@ -250,157 +248,6 @@ def test_append_join_nondatetimeindex(self):
# it works
rng.join(idx, how='outer')
- @pytest.mark.parametrize('op', [operator.eq, operator.ne,
- operator.gt, operator.ge,
- operator.lt, operator.le])
- def test_comparison_tzawareness_compat(self, op):
- # GH#18162
- dr = pd.date_range('2016-01-01', periods=6)
- dz = dr.tz_localize('US/Pacific')
-
- with pytest.raises(TypeError):
- op(dr, dz)
- with pytest.raises(TypeError):
- op(dr, list(dz))
- with pytest.raises(TypeError):
- op(dz, dr)
- with pytest.raises(TypeError):
- op(dz, list(dr))
-
- # Check that there isn't a problem aware-aware and naive-naive do not
- # raise
- assert (dr == dr).all()
- assert (dr == list(dr)).all()
- assert (dz == dz).all()
- assert (dz == list(dz)).all()
-
- # Check comparisons against scalar Timestamps
- ts = pd.Timestamp('2000-03-14 01:59')
- ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
-
- assert (dr > ts).all()
- with pytest.raises(TypeError):
- op(dr, ts_tz)
-
- assert (dz > ts_tz).all()
- with pytest.raises(TypeError):
- op(dz, ts)
-
- @pytest.mark.parametrize('op', [operator.eq, operator.ne,
- operator.gt, operator.ge,
- operator.lt, operator.le])
- def test_nat_comparison_tzawareness(self, op):
- # GH#19276
- # tzaware DatetimeIndex should not raise when compared to NaT
- dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
- '2014-05-01', '2014-07-01'])
- expected = np.array([op == operator.ne] * len(dti))
- result = op(dti, pd.NaT)
- tm.assert_numpy_array_equal(result, expected)
-
- result = op(dti.tz_localize('US/Pacific'), pd.NaT)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_comparisons_coverage(self):
- rng = date_range('1/1/2000', periods=10)
-
- # raise TypeError for now
- pytest.raises(TypeError, rng.__lt__, rng[3].value)
-
- result = rng == list(rng)
- exp = rng == rng
- tm.assert_numpy_array_equal(result, exp)
-
- def test_comparisons_nat(self):
-
- fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
- fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
-
- didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
- '2014-05-01', '2014-07-01'])
- didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
- '2014-06-01', '2014-07-01'])
- darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
- np_datetime64_compat('2014-03-01 00:00Z'),
- np_datetime64_compat('nat'), np.datetime64('nat'),
- np_datetime64_compat('2014-06-01 00:00Z'),
- np_datetime64_compat('2014-07-01 00:00Z')])
-
- cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
-
- # Check pd.NaT is handles as the same as np.nan
- with tm.assert_produces_warning(None):
- for idx1, idx2 in cases:
-
- result = idx1 < idx2
- expected = np.array([True, False, False, False, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx2 > idx1
- expected = np.array([True, False, False, False, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 <= idx2
- expected = np.array([True, False, False, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx2 >= idx1
- expected = np.array([True, False, False, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 == idx2
- expected = np.array([False, False, False, False, False, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 != idx2
- expected = np.array([True, True, True, True, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- with tm.assert_produces_warning(None):
- for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
- result = idx1 < val
- expected = np.array([False, False, False, False, False, False])
- tm.assert_numpy_array_equal(result, expected)
- result = idx1 > val
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 <= val
- tm.assert_numpy_array_equal(result, expected)
- result = idx1 >= val
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 == val
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 != val
- expected = np.array([True, True, True, True, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- # Check pd.NaT is handles as the same as np.nan
- with tm.assert_produces_warning(None):
- for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
- result = idx1 < val
- expected = np.array([True, False, False, False, False, False])
- tm.assert_numpy_array_equal(result, expected)
- result = idx1 > val
- expected = np.array([False, False, False, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 <= val
- expected = np.array([True, False, True, False, False, False])
- tm.assert_numpy_array_equal(result, expected)
- result = idx1 >= val
- expected = np.array([False, False, True, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 == val
- expected = np.array([False, False, True, False, False, False])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 != val
- expected = np.array([True, True, False, True, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
def test_map(self):
rng = date_range('1/1/2000', periods=10)
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 538e10e6011ec..9d6d27ecb4b6f 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -21,28 +21,7 @@ def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
-
- # test shift for datetimeIndex and non datetimeIndex
- # GH8083
-
- drange = self.create_index()
- result = drange.shift(1)
- expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
- '2013-01-05',
- '2013-01-06'], freq='D')
- tm.assert_index_equal(result, expected)
-
- result = drange.shift(-1)
- expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
- '2013-01-03', '2013-01-04'],
- freq='D')
- tm.assert_index_equal(result, expected)
-
- result = drange.shift(3, freq='2D')
- expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
- '2013-01-10',
- '2013-01-11'], freq='D')
- tm.assert_index_equal(result, expected)
+ pass # handled in test_ops
def test_pickle_compat_construction(self):
pass
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index a2a84adbf46c1..a91dbd905e12c 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -385,33 +385,6 @@ def test_resolution(self):
tz=tz)
assert idx.resolution == expected
- def test_comp_nat(self):
- left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
- pd.Timestamp('2011-01-03')])
- right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
-
- for lhs, rhs in [(left, right),
- (left.astype(object), right.astype(object))]:
- result = rhs == lhs
- expected = np.array([False, False, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = lhs != rhs
- expected = np.array([True, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
-
- expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
-
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
@@ -617,6 +590,29 @@ def test_shift(self):
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
+ # TODO: moved from test_datetimelike; de-duplicate with test_shift above
+ def test_shift2(self):
+ # test shift for datetimeIndex and non datetimeIndex
+ # GH8083
+ drange = pd.date_range('20130101', periods=5)
+ result = drange.shift(1)
+ expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
+ '2013-01-05',
+ '2013-01-06'], freq='D')
+ tm.assert_index_equal(result, expected)
+
+ result = drange.shift(-1)
+ expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
+ '2013-01-03', '2013-01-04'],
+ freq='D')
+ tm.assert_index_equal(result, expected)
+
+ result = drange.shift(3, freq='2D')
+ expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
+ '2013-01-10',
+ '2013-01-11'], freq='D')
+ tm.assert_index_equal(result, expected)
+
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index a78bc6fc577b8..21a9ffdde3444 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -6,7 +6,7 @@
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
import pandas.core.indexes.period as period
-from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
+from pandas import (DatetimeIndex, PeriodIndex, Series, Period,
_np_version_under1p10, Index)
from pandas.tests.test_base import Ops
@@ -285,33 +285,6 @@ def test_resolution(self):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
assert idx.resolution == expected
- def test_comp_nat(self):
- left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
- pd.Period('2011-01-03')])
- right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
-
- for lhs, rhs in [(left, right),
- (left.astype(object), right.astype(object))]:
- result = lhs == rhs
- expected = np.array([False, False, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = lhs != rhs
- expected = np.array([True, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
-
- expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
-
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
@@ -732,77 +705,6 @@ def test_pi_comp_period_nat(self):
self._check(idx, f, exp)
-class TestSeriesPeriod(object):
-
- def setup_method(self, method):
- self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
-
- def test_ops_series_timedelta(self):
- # GH 13043
- s = pd.Series([pd.Period('2015-01-01', freq='D'),
- pd.Period('2015-01-02', freq='D')], name='xxx')
- assert s.dtype == object
-
- exp = pd.Series([pd.Period('2015-01-02', freq='D'),
- pd.Period('2015-01-03', freq='D')], name='xxx')
- tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
- tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
-
- tm.assert_series_equal(s + pd.tseries.offsets.Day(), exp)
- tm.assert_series_equal(pd.tseries.offsets.Day() + s, exp)
-
- def test_ops_series_period(self):
- # GH 13043
- s = pd.Series([pd.Period('2015-01-01', freq='D'),
- pd.Period('2015-01-02', freq='D')], name='xxx')
- assert s.dtype == object
-
- p = pd.Period('2015-01-10', freq='D')
- # dtype will be object because of original dtype
- exp = pd.Series([9, 8], name='xxx', dtype=object)
- tm.assert_series_equal(p - s, exp)
- tm.assert_series_equal(s - p, -exp)
-
- s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
- pd.Period('2015-01-04', freq='D')], name='xxx')
- assert s2.dtype == object
-
- exp = pd.Series([4, 2], name='xxx', dtype=object)
- tm.assert_series_equal(s2 - s, exp)
- tm.assert_series_equal(s - s2, -exp)
-
-
-class TestFramePeriod(object):
-
- def test_ops_frame_period(self):
- # GH 13043
- df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'),
- pd.Period('2015-02', freq='M')],
- 'B': [pd.Period('2014-01', freq='M'),
- pd.Period('2014-02', freq='M')]})
- assert df['A'].dtype == object
- assert df['B'].dtype == object
-
- p = pd.Period('2015-03', freq='M')
- # dtype will be object because of original dtype
- exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
- 'B': np.array([14, 13], dtype=object)})
- tm.assert_frame_equal(p - df, exp)
- tm.assert_frame_equal(df - p, -exp)
-
- df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
- pd.Period('2015-06', freq='M')],
- 'B': [pd.Period('2015-05', freq='M'),
- pd.Period('2015-06', freq='M')]})
- assert df2['A'].dtype == object
- assert df2['B'].dtype == object
-
- exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
- 'B': np.array([16, 16], dtype=object)})
- tm.assert_frame_equal(df2 - df, exp)
- tm.assert_frame_equal(df - df2, -exp)
-
-
class TestPeriodIndexComparisons(object):
def test_pi_pi_comp(self):
@@ -942,3 +844,31 @@ def test_pi_nat_comp(self):
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
idx1 == diff
+
+ # TODO: De-duplicate with test_pi_nat_comp
+ def test_comp_nat(self):
+ left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
+ pd.Period('2011-01-03')])
+ right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
+
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = lhs == rhs
+ expected = np.array([False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = lhs != rhs
+ expected = np.array([True, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
+
+ expected = np.array([True, True, True])
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 44f48f3ea9833..1a6aabc2f258f 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -26,9 +26,120 @@ def freq(request):
return request.param
+class TestTimedeltaIndexComparisons(object):
+ def test_tdi_cmp_str_invalid(self):
+ # GH 13624
+ tdi = TimedeltaIndex(['1 day', '2 days'])
+
+ for left, right in [(tdi, 'a'), ('a', tdi)]:
+ with pytest.raises(TypeError):
+ left > right
+
+ with pytest.raises(TypeError):
+ left == right
+
+ with pytest.raises(TypeError):
+ left != right
+
+ def test_comparisons_coverage(self):
+ rng = timedelta_range('1 days', periods=10)
+
+ result = rng < rng[3]
+ exp = np.array([True, True, True] + [False] * 7)
+ tm.assert_numpy_array_equal(result, exp)
+
+ # raise TypeError for now
+ pytest.raises(TypeError, rng.__lt__, rng[3].value)
+
+ result = rng == list(rng)
+ exp = rng == rng
+ tm.assert_numpy_array_equal(result, exp)
+
+ def test_comp_nat(self):
+ left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
+ pd.Timedelta('3 days')])
+ right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
+
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = rhs == lhs
+ expected = np.array([False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = rhs != lhs
+ expected = np.array([True, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
+
+ expected = np.array([True, True, True])
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
+
+ expected = np.array([False, False, False])
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
+
+ def test_comparisons_nat(self):
+ tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
+ '1 day 00:00:01', '5 day 00:00:03'])
+ tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
+ '1 day 00:00:02', '5 days 00:00:03'])
+ tdarr = np.array([np.timedelta64(2, 'D'),
+ np.timedelta64(2, 'D'), np.timedelta64('nat'),
+ np.timedelta64('nat'),
+ np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
+ np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
+
+ cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
+
+ # Check pd.NaT is handles as the same as np.nan
+ for idx1, idx2 in cases:
+
+ result = idx1 < idx2
+ expected = np.array([True, False, False, False, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx2 > idx1
+ expected = np.array([True, False, False, False, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 <= idx2
+ expected = np.array([True, False, False, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx2 >= idx1
+ expected = np.array([True, False, False, False, True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 == idx2
+ expected = np.array([False, False, False, False, False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1 != idx2
+ expected = np.array([True, True, True, True, True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+
class TestTimedeltaIndexArithmetic(object):
_holder = TimedeltaIndex
+ # -------------------------------------------------------------
+ # Invalid Operations
+
+ def test_tdi_add_str_invalid(self):
+ # GH 13624
+ tdi = TimedeltaIndex(['1 day', '2 days'])
+
+ with pytest.raises(TypeError):
+ tdi + 'a'
+ with pytest.raises(TypeError):
+ 'a' + tdi
+
+ # -------------------------------------------------------------
+
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_offset_array(self, box):
# GH#18849
@@ -128,41 +239,68 @@ def test_tdi_with_offset_series(self, names):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
- # TODO: Split by ops, better name
- def test_numeric_compat(self):
+ def test_mul_int(self):
idx = self._holder(np.arange(5, dtype='int64'))
- didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
+ def test_rmul_int(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
result = 1 * idx
tm.assert_index_equal(result, idx)
+ def test_div_int(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
result = idx / 1
tm.assert_index_equal(result, idx)
+ def test_floordiv_int(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
result = idx // 1
tm.assert_index_equal(result, idx)
+ def test_mul_int_array_zerodim(self):
+ rng5 = np.arange(5, dtype='int64')
+ idx = self._holder(rng5)
+ expected = self._holder(rng5 * 5)
result = idx * np.array(5, dtype='int64')
- tm.assert_index_equal(result,
- self._holder(np.arange(5, dtype='int64') * 5))
+ tm.assert_index_equal(result, expected)
+
+ def test_mul_int_array(self):
+ rng5 = np.arange(5, dtype='int64')
+ idx = self._holder(rng5)
+ didx = self._holder(rng5 ** 2)
- result = idx * np.arange(5, dtype='int64')
+ result = idx * rng5
tm.assert_index_equal(result, didx)
+ def test_mul_int_series(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
+ didx = self._holder(np.arange(5, dtype='int64') ** 2)
+
result = idx * Series(np.arange(5, dtype='int64'))
+
tm.assert_series_equal(result, Series(didx))
- rng5 = np.arange(5, dtype='float64')
- result = idx * Series(rng5 + 0.1)
- tm.assert_series_equal(result,
- Series(self._holder(rng5 * (rng5 + 0.1))))
+ def test_mul_float_series(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
+
+ rng5f = np.arange(5, dtype='float64')
+ result = idx * Series(rng5f + 0.1)
+ expected = Series(self._holder(rng5f * (rng5f + 0.1)))
+ tm.assert_series_equal(result, expected)
+
+ def test_dti_mul_dti_raises(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
+ with pytest.raises(TypeError):
+ idx * idx
- # invalid
- pytest.raises(TypeError, lambda: idx * idx)
- pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
- pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
+ def test_dti_mul_too_short_raises(self):
+ idx = self._holder(np.arange(5, dtype='int64'))
+ with pytest.raises(ValueError):
+ idx * self._holder(np.arange(3))
+ with pytest.raises(ValueError):
+ idx * np.array([1, 2])
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 081e299caa876..112c62b7e2f8d 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -212,33 +212,6 @@ def test_summary(self):
result = idx.summary()
assert result == expected
- def test_comp_nat(self):
- left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
- pd.Timedelta('3 days')])
- right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
-
- for lhs, rhs in [(left, right),
- (left.astype(object), right.astype(object))]:
- result = rhs == lhs
- expected = np.array([False, False, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = rhs != lhs
- expected = np.array([True, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
-
- expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
-
- expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
-
def test_value_counts_unique(self):
# GH 7735
@@ -493,23 +466,6 @@ def test_equals(self):
class TestTimedeltas(object):
_multiprocess_can_split_ = True
- def test_ops_error_str(self):
- # GH 13624
- tdi = TimedeltaIndex(['1 day', '2 days'])
-
- for l, r in [(tdi, 'a'), ('a', tdi)]:
- with pytest.raises(TypeError):
- l + r
-
- with pytest.raises(TypeError):
- l > r
-
- with pytest.raises(TypeError):
- l == r
-
- with pytest.raises(TypeError):
- l != r
-
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
@@ -564,18 +520,3 @@ def test_timedelta_ops(self):
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
assert s.diff().median() == timedelta(days=6)
-
- def test_compare_timedelta_series(self):
- # regresssion test for GH5963
- s = pd.Series([timedelta(days=1), timedelta(days=2)])
- actual = s > timedelta(days=1)
- expected = pd.Series([False, True])
- tm.assert_series_equal(actual, expected)
-
- def test_compare_timedelta_ndarray(self):
- # GH11835
- periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
- arr = np.array(periods)
- result = arr[0] > arr
- expected = np.array([False, False])
- tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 5a4d6dabbde3e..1af971e8a4326 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -203,61 +203,6 @@ def test_map(self):
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
- def test_comparisons_nat(self):
-
- tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
- '1 day 00:00:01', '5 day 00:00:03'])
- tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
- '1 day 00:00:02', '5 days 00:00:03'])
- tdarr = np.array([np.timedelta64(2, 'D'),
- np.timedelta64(2, 'D'), np.timedelta64('nat'),
- np.timedelta64('nat'),
- np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
- np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
-
- cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
-
- # Check pd.NaT is handles as the same as np.nan
- for idx1, idx2 in cases:
-
- result = idx1 < idx2
- expected = np.array([True, False, False, False, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx2 > idx1
- expected = np.array([True, False, False, False, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 <= idx2
- expected = np.array([True, False, False, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx2 >= idx1
- expected = np.array([True, False, False, False, True, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 == idx2
- expected = np.array([False, False, False, False, False, True])
- tm.assert_numpy_array_equal(result, expected)
-
- result = idx1 != idx2
- expected = np.array([True, True, True, True, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
- def test_comparisons_coverage(self):
- rng = timedelta_range('1 days', periods=10)
-
- result = rng < rng[3]
- exp = np.array([True, True, True] + [False] * 7)
- tm.assert_numpy_array_equal(result, exp)
-
- # raise TypeError for now
- pytest.raises(TypeError, rng.__lt__, rng[3].value)
-
- result = rng == list(rng)
- exp = rng == rng
- tm.assert_numpy_array_equal(result, exp)
-
def test_total_seconds(self):
# GH 10939
# test index
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index 8c574d8f8873b..64d4940082978 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -276,6 +276,14 @@ def test_comparison_object_array(self):
assert res.shape == expected.shape
assert (res == expected).all()
+ def test_compare_timedelta_ndarray(self):
+ # GH11835
+ periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
+ arr = np.array(periods)
+ result = arr[0] > arr
+ expected = np.array([False, False])
+ tm.assert_numpy_array_equal(result, expected)
+
class TestTimedeltas(object):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
new file mode 100644
index 0000000000000..9db05ff590fed
--- /dev/null
+++ b/pandas/tests/series/test_arithmetic.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+from datetime import timedelta
+
+import pandas as pd
+import pandas.util.testing as tm
+
+
+class TestTimedeltaSeriesComparisons(object):
+ def test_compare_timedelta_series(self):
+ # regresssion test for GH5963
+ s = pd.Series([timedelta(days=1), timedelta(days=2)])
+ actual = s > timedelta(days=1)
+ expected = pd.Series([False, True])
+ tm.assert_series_equal(actual, expected)
+
+
+class TestPeriodSeriesArithmetic(object):
+ def test_ops_series_timedelta(self):
+ # GH 13043
+ ser = pd.Series([pd.Period('2015-01-01', freq='D'),
+ pd.Period('2015-01-02', freq='D')], name='xxx')
+ assert ser.dtype == object
+
+ expected = pd.Series([pd.Period('2015-01-02', freq='D'),
+ pd.Period('2015-01-03', freq='D')], name='xxx')
+
+ result = ser + pd.Timedelta('1 days')
+ tm.assert_series_equal(result, expected)
+
+ result = pd.Timedelta('1 days') + ser
+ tm.assert_series_equal(result, expected)
+
+ result = ser + pd.tseries.offsets.Day()
+ tm.assert_series_equal(result, expected)
+
+ result = pd.tseries.offsets.Day() + ser
+ tm.assert_series_equal(result, expected)
+
+ def test_ops_series_period(self):
+ # GH 13043
+ ser = pd.Series([pd.Period('2015-01-01', freq='D'),
+ pd.Period('2015-01-02', freq='D')], name='xxx')
+ assert ser.dtype == object
+
+ per = pd.Period('2015-01-10', freq='D')
+ # dtype will be object because of original dtype
+ expected = pd.Series([9, 8], name='xxx', dtype=object)
+ tm.assert_series_equal(per - ser, expected)
+ tm.assert_series_equal(ser - per, -expected)
+
+ s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
+ pd.Period('2015-01-04', freq='D')], name='xxx')
+ assert s2.dtype == object
+
+ expected = pd.Series([4, 2], name='xxx', dtype=object)
+ tm.assert_series_equal(s2 - ser, expected)
+ tm.assert_series_equal(ser - s2, -expected)
| These are almost all cut/paste moving tests to the appropriate places. Two exceptions:
- Some "TODO: de-duplicate this with that" notes (mostly to myself)
- `TestTimedeltaIndexArithmetic.test_numeric_compat` badly needed to be split into more specific tests, so that is done here. | https://api.github.com/repos/pandas-dev/pandas/pulls/19317 | 2018-01-19T18:39:21Z | 2018-01-21T15:25:56Z | 2018-01-21T15:25:56Z | 2018-01-21T18:36:27Z |
FIX: Raise errors when wrong string arguments are passed to `resample` | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 86fc47dee09fc..148fabfb96d68 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -486,7 +486,7 @@ Groupby/Resample/Rolling
- Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`)
- Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`)
--
+- Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`)
-
Sparse
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 5447ce7470b9d..04f5c124deccc 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1061,6 +1061,17 @@ class TimeGrouper(Grouper):
def __init__(self, freq='Min', closed=None, label=None, how='mean',
axis=0, fill_method=None, limit=None, loffset=None,
kind=None, convention=None, base=0, **kwargs):
+ # Check for correctness of the keyword arguments which would
+ # otherwise silently use the default if misspelled
+ if label not in {None, 'left', 'right'}:
+ raise ValueError('Unsupported value {} for `label`'.format(label))
+ if closed not in {None, 'left', 'right'}:
+ raise ValueError('Unsupported value {} for `closed`'.format(
+ closed))
+ if convention not in {None, 'start', 'end', 'e', 's'}:
+ raise ValueError('Unsupported value {} for `convention`'
+ .format(convention))
+
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index e9a517605020a..6f77e7854cf76 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -963,6 +963,7 @@ def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
+
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
@@ -985,6 +986,20 @@ def test_resample_basic(self):
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
+ def test_resample_string_kwargs(self):
+ # Test for issue #19303
+ rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
+ name='index')
+ s = Series(np.random.randn(14), index=rng)
+
+ # Check that wrong keyword argument strings raise an error
+ with pytest.raises(ValueError):
+ s.resample('5min', label='righttt').mean()
+ with pytest.raises(ValueError):
+ s.resample('5min', closed='righttt').mean()
+ with pytest.raises(ValueError):
+ s.resample('5min', convention='starttt').mean()
+
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
| - [x] closes #19303
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
As a first step, the initial commit just extends the resampling tests so that they fail when a misstyped string argument, e.g. for `label`, passes silently without raising.
I have the fix for the code ready and will commit when the tests have failed once. | https://api.github.com/repos/pandas-dev/pandas/pulls/19307 | 2018-01-18T23:36:00Z | 2018-01-21T15:47:34Z | 2018-01-21T15:47:34Z | 2018-01-23T14:44:15Z |
separate _libs/src/reduce.pyx to _libs.reduction | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 9c0791c3eb8ce..1632f5d016439 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -6,7 +6,7 @@ from cython cimport Py_ssize_t
import numpy as np
cimport numpy as np
-from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, PyArray_SETITEM,
+from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
flatiter, NPY_OBJECT,
int64_t,
@@ -57,8 +57,6 @@ cimport util
cdef int64_t NPY_NAT = util.get_nat()
from util cimport is_array, _checknull
-from libc.math cimport fabs, sqrt
-
def values_from_object(object o):
""" return my values or the object if we are say an ndarray """
@@ -1119,5 +1117,4 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys,
return result
-include "reduce.pyx"
include "inference.pyx"
diff --git a/pandas/_libs/src/reduce.pyx b/pandas/_libs/reduction.pyx
similarity index 97%
rename from pandas/_libs/src/reduce.pyx
rename to pandas/_libs/reduction.pyx
index f0ec8d284ef0e..d51583c7aa473 100644
--- a/pandas/_libs/src/reduce.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -1,9 +1,24 @@
# -*- coding: utf-8 -*-
# cython: profile=False
-import numpy as np
-
from distutils.version import LooseVersion
+from cython cimport Py_ssize_t
+from cpython cimport Py_INCREF
+
+from libc.stdlib cimport malloc, free
+
+import numpy as np
+cimport numpy as np
+from numpy cimport (ndarray,
+ int64_t,
+ PyArray_SETITEM,
+ PyArray_ITER_NEXT, PyArray_ITER_DATA, PyArray_IterNew,
+ flatiter)
+np.import_array()
+
+cimport util
+from lib import maybe_convert_objects
+
is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2'
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 2f43087f7dff9..4cdec54b9a07a 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,6 +1,6 @@
import numpy as np
from pandas import compat
-from pandas._libs import lib
+from pandas._libs import reduction
from pandas.core.dtypes.common import (
is_extension_type,
is_sequence)
@@ -114,7 +114,7 @@ def apply_empty_result(self):
def apply_raw(self):
try:
- result = lib.reduce(self.values, self.f, axis=self.axis)
+ result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
@@ -150,10 +150,10 @@ def apply_standard(self):
try:
labels = self.agg_axis
- result = lib.reduce(values, self.f,
- axis=self.axis,
- dummy=dummy,
- labels=labels)
+ result = reduction.reduce(values, self.f,
+ axis=self.axis,
+ dummy=dummy,
+ labels=labels)
return Series(result, index=labels)
except Exception:
pass
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 25e44589488ee..66162af1e7314 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -66,7 +66,9 @@
from pandas.plotting._core import boxplot_frame_groupby
-from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT
+from pandas._libs import (lib, reduction,
+ groupby as libgroupby,
+ Timestamp, NaT, iNaT)
from pandas._libs.lib import count_level_2d
_doc_template = """
@@ -1981,7 +1983,7 @@ def apply(self, f, data, axis=0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
- except (lib.InvalidApply):
+ except reduction.InvalidApply:
# we detect a mutation of some kind
# so take slow path
pass
@@ -2404,8 +2406,8 @@ def _aggregate_series_fast(self, obj, func):
obj = obj._take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
- grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
- dummy)
+ grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups,
+ dummy)
result, counts = grouper.get_result()
return result, counts
@@ -2618,7 +2620,7 @@ def groupings(self):
def agg_series(self, obj, func):
dummy = obj[:0]
- grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
+ grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
@@ -4758,7 +4760,8 @@ def fast_apply(self, f, names):
return [], True
sdata = self._get_sorted_data()
- results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
+ results, mutated = reduction.apply_frame_axis0(sdata, f, names,
+ starts, ends)
return results, mutated
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index 8b95455b53d22..979b2f7a539af 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -9,7 +9,7 @@
from pandas import Index, isna
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
-from pandas._libs import lib, groupby
+from pandas._libs import lib, groupby, reduction
def test_series_grouper():
@@ -19,7 +19,7 @@ def test_series_grouper():
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
- grouper = lib.SeriesGrouper(obj, np.mean, labels, 2, dummy)
+ grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
@@ -36,7 +36,7 @@ def test_series_bin_grouper():
bins = np.array([3, 6])
- grouper = lib.SeriesBinGrouper(obj, np.mean, bins, dummy)
+ grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
@@ -127,26 +127,27 @@ def test_int_index(self):
from pandas.core.series import Series
arr = np.random.randn(100, 4)
- result = lib.reduce(arr, np.sum, labels=Index(np.arange(4)))
+ result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
- result = lib.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))
+ result = reduction.reduce(arr, np.sum, axis=1,
+ labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(100))
- result = lib.reduce(arr, np.sum, dummy=dummy,
- labels=Index(np.arange(4)))
+ result = reduction.reduce(arr, np.sum, dummy=dummy,
+ labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(4))
- result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,
- labels=Index(np.arange(100)))
+ result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy,
+ labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
- result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,
- labels=Index(np.arange(100)))
+ result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy,
+ labels=Index(np.arange(100)))
assert_almost_equal(result, expected)
diff --git a/setup.py b/setup.py
index 16ca0c132eaa9..7ade1544ec5cd 100755
--- a/setup.py
+++ b/setup.py
@@ -309,6 +309,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/interval.pyx',
'pandas/_libs/hashing.pyx',
'pandas/_libs/missing.pyx',
+ 'pandas/_libs/reduction.pyx',
'pandas/_libs/testing.pyx',
'pandas/_libs/window.pyx',
'pandas/_libs/skiplist.pyx',
@@ -506,6 +507,8 @@ def pxd(name):
'pandas/_libs/src/numpy_helper.h'],
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
+ '_libs.reduction': {
+ 'pyxfile': '_libs/reduction'},
'_libs.tslibs.period': {
'pyxfile': '_libs/tslibs/period',
'pxdfiles': ['_libs/src/util',
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19306 | 2018-01-18T23:18:30Z | 2018-01-21T17:57:37Z | 2018-01-21T17:57:37Z | 2018-01-23T04:39:54Z |
Remove duplicate is_lexsorted function | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index f6c70027ae6f1..5f8c761157e88 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -896,38 +896,6 @@ def write_csv_rows(list data, ndarray data_index,
# ------------------------------------------------------------------------------
# Groupby-related functions
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def is_lexsorted(list list_of_arrays):
- cdef:
- int i
- Py_ssize_t n, nlevels
- int64_t k, cur, pre
- ndarray arr
-
- nlevels = len(list_of_arrays)
- n = len(list_of_arrays[0])
-
- cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*))
- for i from 0 <= i < nlevels:
- arr = list_of_arrays[i]
- vecs[i] = <int64_t *> arr.data
-
- # Assume uniqueness??
- for i from 1 <= i < n:
- for k from 0 <= k < nlevels:
- cur = vecs[k][i]
- pre = vecs[k][i - 1]
- if cur == pre:
- continue
- elif cur > pre:
- break
- else:
- return False
- free(vecs)
- return True
-
-
# TODO: could do even better if we know something about the data. eg, index has
# 1-min data, binner has 5-min data, then bins are just strides in index. This
# is a general, O(max(len(values), len(binner))) method.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 608553b9c3bf2..e50e87f8bd571 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -5,7 +5,7 @@
from sys import getsizeof
import numpy as np
-from pandas._libs import index as libindex, lib, Timestamp
+from pandas._libs import algos as libalgos, index as libindex, lib, Timestamp
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
@@ -1137,7 +1137,7 @@ def lexsort_depth(self):
int64_labels = [_ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
- if lib.is_lexsorted(int64_labels[:k]):
+ if libalgos.is_lexsorted(int64_labels[:k]):
return k
return 0
| `_libs.lib` and `_libs.algos` have near-identical `is_lexsorted` functions. The only differences appear to be small optimizations/modernizations in the `algos` version. AFAICT the `algos` version is only used in `tests.test_algos` ATM. This PR removes the `libs._lib` version and changes the one usage (in `indexes.multi`) to use the `algos` version.
| https://api.github.com/repos/pandas-dev/pandas/pulls/19305 | 2018-01-18T22:34:26Z | 2018-01-19T11:01:49Z | 2018-01-19T11:01:49Z | 2018-01-19T16:03:39Z |
Fix DTI comparison with None, datetime.date | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 66e88e181ac0f..89276e3c241d0 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -452,6 +452,8 @@ Datetimelike
- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`)
- Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`)
+- Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`)
+-
Timezones
^^^^^^^^^
@@ -483,8 +485,6 @@ Numeric
- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`)
--
-
Indexing
^^^^^^^^
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 8dd41c022d163..8fd5794f2637b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -118,8 +118,16 @@ def wrapper(self, other):
else:
if isinstance(other, list):
other = DatetimeIndex(other)
- elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
- other = _ensure_datetime64(other)
+ elif not isinstance(other, (np.datetime64, np.ndarray,
+ Index, ABCSeries)):
+ # Following Timestamp convention, __eq__ is all-False
+ # and __ne__ is all True, others raise TypeError.
+ if opname == '__eq__':
+ return np.zeros(shape=self.shape, dtype=bool)
+ elif opname == '__ne__':
+ return np.ones(shape=self.shape, dtype=bool)
+ raise TypeError('%s type object %s' %
+ (type(other), str(other)))
if is_datetimelike(other):
self._assert_tzawareness_compat(other)
@@ -146,12 +154,6 @@ def wrapper(self, other):
return compat.set_function_name(wrapper, opname, cls)
-def _ensure_datetime64(other):
- if isinstance(other, np.datetime64):
- return other
- raise TypeError('%s type object %s' % (type(other), str(other)))
-
-
_midnight = time(0, 0)
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 671071b5e4945..09a6b35a0ff0e 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -14,6 +14,7 @@
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
+from pandas._libs import tslib
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
@@ -44,7 +45,83 @@ def addend(request):
class TestDatetimeIndexComparisons(object):
- # TODO: De-duplicate with test_comparisons_nat below
+ @pytest.mark.parametrize('other', [datetime(2016, 1, 1),
+ Timestamp('2016-01-01'),
+ np.datetime64('2016-01-01')])
+ def test_dti_cmp_datetimelike(self, other, tz):
+ dti = pd.date_range('2016-01-01', periods=2, tz=tz)
+ if tz is not None:
+ if isinstance(other, np.datetime64):
+ # no tzaware version available
+ return
+ elif isinstance(other, Timestamp):
+ other = other.tz_localize(dti.tzinfo)
+ else:
+ other = tslib._localize_pydatetime(other, dti.tzinfo)
+
+ result = dti == other
+ expected = np.array([True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = dti > other
+ expected = np.array([False, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = dti >= other
+ expected = np.array([True, True])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = dti < other
+ expected = np.array([False, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = dti <= other
+ expected = np.array([True, False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ def dti_cmp_non_datetime(self, tz):
+ # GH#19301 by convention datetime.date is not considered comparable
+ # to Timestamp or DatetimeIndex. This may change in the future.
+ dti = pd.date_range('2016-01-01', periods=2, tz=tz)
+
+ other = datetime(2016, 1, 1).date()
+ assert not (dti == other).any()
+ assert (dti != other).all()
+ with pytest.raises(TypeError):
+ dti < other
+ with pytest.raises(TypeError):
+ dti <= other
+ with pytest.raises(TypeError):
+ dti > other
+ with pytest.raises(TypeError):
+ dti >= other
+
+ @pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
+ def test_dti_eq_null_scalar(self, other, tz):
+ # GH#19301
+ dti = pd.date_range('2016-01-01', periods=2, tz=tz)
+ assert not (dti == other).any()
+
+ @pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
+ def test_dti_ne_null_scalar(self, other, tz):
+ # GH#19301
+ dti = pd.date_range('2016-01-01', periods=2, tz=tz)
+ assert (dti != other).all()
+
+ @pytest.mark.parametrize('other', [None, np.nan])
+ def test_dti_cmp_null_scalar_inequality(self, tz, other):
+ # GH#19301
+ dti = pd.date_range('2016-01-01', periods=2, tz=tz)
+
+ with pytest.raises(TypeError):
+ dti < other
+ with pytest.raises(TypeError):
+ dti <= other
+ with pytest.raises(TypeError):
+ dti > other
+ with pytest.raises(TypeError):
+ dti >= other
+
def test_dti_cmp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
@@ -72,69 +149,7 @@ def test_dti_cmp_nat(self):
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
- @pytest.mark.parametrize('op', [operator.eq, operator.ne,
- operator.gt, operator.ge,
- operator.lt, operator.le])
- def test_comparison_tzawareness_compat(self, op):
- # GH#18162
- dr = pd.date_range('2016-01-01', periods=6)
- dz = dr.tz_localize('US/Pacific')
-
- with pytest.raises(TypeError):
- op(dr, dz)
- with pytest.raises(TypeError):
- op(dr, list(dz))
- with pytest.raises(TypeError):
- op(dz, dr)
- with pytest.raises(TypeError):
- op(dz, list(dr))
-
- # Check that there isn't a problem aware-aware and naive-naive do not
- # raise
- assert (dr == dr).all()
- assert (dr == list(dr)).all()
- assert (dz == dz).all()
- assert (dz == list(dz)).all()
-
- # Check comparisons against scalar Timestamps
- ts = pd.Timestamp('2000-03-14 01:59')
- ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
-
- assert (dr > ts).all()
- with pytest.raises(TypeError):
- op(dr, ts_tz)
-
- assert (dz > ts_tz).all()
- with pytest.raises(TypeError):
- op(dz, ts)
-
- @pytest.mark.parametrize('op', [operator.eq, operator.ne,
- operator.gt, operator.ge,
- operator.lt, operator.le])
- def test_nat_comparison_tzawareness(self, op):
- # GH#19276
- # tzaware DatetimeIndex should not raise when compared to NaT
- dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
- '2014-05-01', '2014-07-01'])
- expected = np.array([op == operator.ne] * len(dti))
- result = op(dti, pd.NaT)
- tm.assert_numpy_array_equal(result, expected)
-
- result = op(dti.tz_localize('US/Pacific'), pd.NaT)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_comparisons_coverage(self):
- rng = date_range('1/1/2000', periods=10)
-
- # raise TypeError for now
- pytest.raises(TypeError, rng.__lt__, rng[3].value)
-
- result = rng == list(rng)
- exp = rng == rng
- tm.assert_numpy_array_equal(result, exp)
-
- def test_comparisons_nat(self):
-
+ def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
@@ -223,6 +238,71 @@ def test_comparisons_nat(self):
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize('op', [operator.eq, operator.ne,
+ operator.gt, operator.ge,
+ operator.lt, operator.le])
+ def test_comparison_tzawareness_compat(self, op):
+ # GH#18162
+ dr = pd.date_range('2016-01-01', periods=6)
+ dz = dr.tz_localize('US/Pacific')
+
+ with pytest.raises(TypeError):
+ op(dr, dz)
+ with pytest.raises(TypeError):
+ op(dr, list(dz))
+ with pytest.raises(TypeError):
+ op(dz, dr)
+ with pytest.raises(TypeError):
+ op(dz, list(dr))
+
+ # Check that there isn't a problem aware-aware and naive-naive do not
+ # raise
+ assert (dr == dr).all()
+ assert (dr == list(dr)).all()
+ assert (dz == dz).all()
+ assert (dz == list(dz)).all()
+
+ # Check comparisons against scalar Timestamps
+ ts = pd.Timestamp('2000-03-14 01:59')
+ ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
+
+ assert (dr > ts).all()
+ with pytest.raises(TypeError):
+ op(dr, ts_tz)
+
+ assert (dz > ts_tz).all()
+ with pytest.raises(TypeError):
+ op(dz, ts)
+
+ @pytest.mark.parametrize('op', [operator.eq, operator.ne,
+ operator.gt, operator.ge,
+ operator.lt, operator.le])
+ def test_nat_comparison_tzawareness(self, op):
+ # GH#19276
+ # tzaware DatetimeIndex should not raise when compared to NaT
+ dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
+ '2014-05-01', '2014-07-01'])
+ expected = np.array([op == operator.ne] * len(dti))
+ result = op(dti, pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = op(dti.tz_localize('US/Pacific'), pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_dti_cmp_int_raises(self):
+ rng = date_range('1/1/2000', periods=10)
+
+ # raise TypeError for now
+ with pytest.raises(TypeError):
+ rng < rng[3].value
+
+ def test_dti_cmp_list(self):
+ rng = date_range('1/1/2000', periods=10)
+
+ result = rng == list(rng)
+ expected = rng == rng
+ tm.assert_numpy_array_equal(result, expected)
+
class TestDatetimeIndexArithmetic(object):
| Discussed in #19288
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19301 | 2018-01-18T17:21:09Z | 2018-02-02T11:38:05Z | 2018-02-02T11:38:05Z | 2018-02-04T16:41:08Z |
Bug: adds support for unary plus | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 1c6b698605521..f70e1198aa999 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -253,6 +253,7 @@ Current Behavior:
Other Enhancements
^^^^^^^^^^^^^^^^^^
+- Unary ``+`` now permitted for ``Series`` and ``DataFrame`` as numeric operator (:issue:`16073`)
- Better support for :func:`Dataframe.style.to_excel` output with the ``xlsxwriter`` engine. (:issue:`16149`)
- :func:`pandas.tseries.frequencies.to_offset` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`)
- :func:`MultiIndex.unique` now supports the ``level=`` argument, to get unique values from a specific index level (:issue:`17896`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index cb4bbb7b27c42..35f866c9e7d58 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -25,6 +25,7 @@
is_list_like,
is_dict_like,
is_re_compilable,
+ is_period_arraylike,
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.inference import is_hashable
@@ -1027,10 +1028,24 @@ def _indexed_same(self, other):
def __neg__(self):
values = com._values_from_object(self)
- if values.dtype == np.bool_:
+ if is_bool_dtype(values):
arr = operator.inv(values)
- else:
+ elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)):
arr = operator.neg(values)
+ else:
+ raise TypeError("Unary negative expects numeric dtype, not {}"
+ .format(values.dtype))
+ return self.__array_wrap__(arr)
+
+ def __pos__(self):
+ values = com._values_from_object(self)
+ if (is_bool_dtype(values) or is_period_arraylike(values)):
+ arr = values
+ elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)):
+ arr = operator.pos(values)
+ else:
+ raise TypeError("Unary plus expects numeric dtype, not {}"
+ .format(values.dtype))
return self.__array_wrap__(arr)
def __invert__(self):
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 9c3572f9ffe72..07ba0b681418e 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -542,66 +542,42 @@ def test_frame_pos(self):
# float
lhs = DataFrame(randn(5, 2))
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_frame_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_frame_equal(expect, result)
# int
lhs = DataFrame(randint(5, size=(5, 2)))
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_frame_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(rand(5, 2) > 0.5)
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_frame_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_frame_equal(expect, result)
def test_series_pos(self):
expr = self.ex('+')
# float
lhs = Series(randn(5))
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_series_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_series_equal(expect, result)
# int
lhs = Series(randint(5, size=5))
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_series_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(rand(5) > 0.5)
- if self.engine == 'python':
- with pytest.raises(TypeError):
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- else:
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- assert_series_equal(expect, result)
+ expect = lhs
+ result = pd.eval(expr, engine=self.engine, parser=self.parser)
+ assert_series_equal(expect, result)
def test_scalar_unary(self):
with pytest.raises(TypeError):
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 1bb8e8edffc6e..a3a799aed1c55 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -245,7 +245,7 @@ def test_ops_frame_period(self):
exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
'B': np.array([14, 13], dtype=object)})
tm.assert_frame_equal(p - df, exp)
- tm.assert_frame_equal(df - p, -exp)
+ tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')],
@@ -257,4 +257,4 @@ def test_ops_frame_period(self):
exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
'B': np.array([16, 16], dtype=object)})
tm.assert_frame_equal(df2 - df, exp)
- tm.assert_frame_equal(df - df2, -exp)
+ tm.assert_frame_equal(df - df2, -1 * exp)
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 26974b6398694..5df50f3d7835b 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -271,13 +271,50 @@ def test_logical_with_nas(self):
expected = Series([True, True])
assert_series_equal(result, expected)
- def test_neg(self):
- # what to do?
- assert_frame_equal(-self.frame, -1 * self.frame)
+ @pytest.mark.parametrize('df,expected', [
+ (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
+ (pd.DataFrame({'a': [False, True]}),
+ pd.DataFrame({'a': [True, False]})),
+ (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
+ pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
+ ])
+ def test_neg_numeric(self, df, expected):
+ assert_frame_equal(-df, expected)
+ assert_series_equal(-df['a'], expected['a'])
+
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': ['a', 'b']}),
+ pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
+ ])
+ def test_neg_raises(self, df):
+ with pytest.raises(TypeError):
+ (- df)
+ with pytest.raises(TypeError):
+ (- df['a'])
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': [-1, 1]}),
+ pd.DataFrame({'a': [False, True]}),
+ pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
+ ])
+ def test_pos_numeric(self, df):
+ # GH 16073
+ assert_frame_equal(+df, df)
+ assert_series_equal(+df['a'], df['a'])
+
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': ['a', 'b']}),
+ pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
+ ])
+ def test_pos_raises(self, df):
+ with pytest.raises(TypeError):
+ (+ df)
+ with pytest.raises(TypeError):
+ (+ df['a'])
+
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 1d9fa9dc15531..94da97ef45301 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -315,7 +315,7 @@ def test_ops_series_period(self):
# dtype will be object because of original dtype
expected = pd.Series([9, 8], name='xxx', dtype=object)
tm.assert_series_equal(per - ser, expected)
- tm.assert_series_equal(ser - per, -expected)
+ tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
pd.Period('2015-01-04', freq='D')], name='xxx')
@@ -323,7 +323,7 @@ def test_ops_series_period(self):
expected = pd.Series([4, 2], name='xxx', dtype=object)
tm.assert_series_equal(s2 - ser, expected)
- tm.assert_series_equal(ser - s2, -expected)
+ tm.assert_series_equal(ser - s2, -1 * expected)
class TestTimestampSeriesArithmetic(object):
| - [ ] closes #16073
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Adds missing unary plus operator from #16073. Adds typechecking behavior to unary neg, along with tests for both. | https://api.github.com/repos/pandas-dev/pandas/pulls/19297 | 2018-01-18T13:02:38Z | 2018-02-08T11:32:05Z | 2018-02-08T11:32:04Z | 2018-02-08T12:53:18Z |
cleanup inconsistently used imports | diff --git a/ci/lint.sh b/ci/lint.sh
index 35b39e2abb3c6..a96e0961304e7 100755
--- a/ci/lint.sh
+++ b/ci/lint.sh
@@ -91,6 +91,15 @@ if [ "$LINT" ]; then
fi
echo "Check for invalid testing DONE"
+ # Check for imports from pandas.core.common instead
+ # of `import pandas.core.common as com`
+ echo "Check for non-standard imports"
+ grep -R --include="*.py*" -E "from pandas.core.common import " pandas
+ if [ $? = "0" ]; then
+ RET=1
+ fi
+ echo "Check for non-standard imports DONE"
+
echo "Check for use of lists instead of generators in built-in Python functions"
# Example: Avoid `any([i for i in some_iterator])` in favor of `any(i for i in some_iterator)`
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 708f903cd73cb..b50e01b0fb55a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -28,7 +28,6 @@
is_list_like, is_sequence,
is_scalar,
is_dict_like)
-from pandas.core.common import is_null_slice, _maybe_box_datetimelike
from pandas.core.algorithms import factorize, take_1d, unique1d
from pandas.core.accessor import PandasDelegate
@@ -468,7 +467,7 @@ def tolist(self):
(for Timestamp/Timedelta/Interval/Period)
"""
if is_datetimelike(self.categories):
- return [_maybe_box_datetimelike(x) for x in self]
+ return [com._maybe_box_datetimelike(x) for x in self]
return np.array(self).tolist()
@property
@@ -1686,7 +1685,7 @@ def _slice(self, slicer):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
- if not is_null_slice(slicer[0]):
+ if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
@@ -1847,7 +1846,7 @@ def __setitem__(self, key, value):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
- if not is_null_slice(key[0]):
+ if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4b3e74eae36b8..54d25a16a10a3 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -24,7 +24,6 @@
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
-from pandas.core.common import AbstractMethodError, _maybe_box_datetimelike
from pandas.core.accessor import DirNamesMixin
@@ -46,7 +45,7 @@ class StringMixin(object):
# Formatting
def __unicode__(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def __str__(self):
"""
@@ -278,10 +277,10 @@ def _gotitem(self, key, ndim, subset=None):
subset to act on
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
agg = aggregate
@@ -815,7 +814,7 @@ def tolist(self):
"""
if is_datetimelike(self):
- return [_maybe_box_datetimelike(x) for x in self._values]
+ return [com._maybe_box_datetimelike(x) for x in self._values]
else:
return self._values.tolist()
@@ -1238,4 +1237,4 @@ def duplicated(self, keep='first'):
# abstracts
def _update_inplace(self, result, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index 2e912b0075bfd..22c8b641cf974 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import compat
from pandas.errors import PerformanceWarning
-from pandas.core.common import flatten
+import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
@@ -117,7 +117,7 @@ def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
- terms = list(flatten(terms))
+ terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 1dc19d33f3365..781101f5804e6 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -8,7 +8,8 @@
import warnings
import numpy as np
-from pandas.core.common import _values_from_object
+
+import pandas.core.common as com
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
@@ -122,8 +123,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True,
def _where_standard(cond, a, b):
- return np.where(_values_from_object(cond), _values_from_object(a),
- _values_from_object(b))
+ return np.where(com._values_from_object(cond), com._values_from_object(a),
+ com._values_from_object(b))
def _where_numexpr(cond, a, b):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f0919871218f5..847779b1747cf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -62,12 +62,6 @@
from pandas.core.dtypes.missing import isna, notna
-from pandas.core.common import (_try_sort,
- _default_index,
- _values_from_object,
- _maybe_box_datetimelike,
- _dict_compat,
- standardize_mapping)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_ensure_index_from_sequences)
@@ -387,9 +381,9 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
- index = _default_index(len(data[0]))
+ index = com._default_index(len(data[0]))
else:
- index = _default_index(len(data))
+ index = com._default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
@@ -466,7 +460,7 @@ def _init_dict(self, data, index, columns, dtype=None):
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
- keys = _try_sort(keys)
+ keys = com._try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
@@ -493,12 +487,12 @@ def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
- index = _default_index(N)
+ index = com._default_index(N)
else:
index = _ensure_index(index)
if columns is None:
- columns = _default_index(K)
+ columns = com._default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
@@ -990,7 +984,7 @@ def to_dict(self, orient='dict', into=dict):
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
- into_c = standardize_mapping(into)
+ into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
@@ -1000,13 +994,13 @@ def to_dict(self, orient='dict', into=dict):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
- _maybe_box_datetimelike)
+ com._maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
- return into_c((k, _maybe_box_datetimelike(v))
+ return into_c((k, com._maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
- return [into_c((k, _maybe_box_datetimelike(v))
+ return [into_c((k, com._maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
@@ -1947,30 +1941,28 @@ def transpose(self, *args, **kwargs):
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
- from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
- columns = _unpickle_array(cols)
+ columns = com._unpickle_array(cols)
- index = _unpickle_array(idx)
+ index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
- from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
- index = _unpickle_array(idx)
- dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
+ index = com._unpickle_array(idx)
+ dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
- columns=_unpickle_array(ocols), copy=False)
+ columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
@@ -2006,7 +1998,7 @@ def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
- return _maybe_box_datetimelike(series._values[index])
+ return com._maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
@@ -3371,7 +3363,7 @@ def _maybe_casted_values(index, labels=None):
values, mask, np.nan)
return values
- new_index = _default_index(len(new_obj))
+ new_index = com._default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
@@ -6084,7 +6076,7 @@ def extract_index(data):
(lengths[0], len(index)))
raise ValueError(msg)
else:
- index = _default_index(lengths[0])
+ index = com._default_index(lengths[0])
return _ensure_index(index)
@@ -6155,7 +6147,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
- columns = _default_index(len(data))
+ columns = com._default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
@@ -6179,7 +6171,7 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
if index is None:
index = _get_names_from_index(fdata)
if index is None:
- index = _default_index(len(data))
+ index = com._default_index(len(data))
index = _ensure_index(index)
if columns is not None:
@@ -6239,14 +6231,14 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
for s in data:
index = getattr(s, 'index', None)
if index is None:
- index = _default_index(len(s))
+ index = com._default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
- values = _values_from_object(s)
+ values = com._values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
@@ -6276,7 +6268,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
- columns = _default_index(len(content))
+ columns = com._default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
@@ -6298,7 +6290,7 @@ def convert(arr):
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
- return _default_index(len(data))
+ return com._default_index(len(data))
index = lrange(len(data))
count = 0
@@ -6333,7 +6325,7 @@ def _homogenize(data, index, dtype=None):
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
- v = _dict_compat(v)
+ v = com._dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7ffef9c8a86d7..6e777281b11e1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -30,10 +30,6 @@
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
-from pandas.core.common import (_count_not_none,
- _maybe_box_datetimelike, _values_from_object,
- AbstractMethodError, SettingWithCopyError,
- SettingWithCopyWarning)
from pandas.core.base import PandasObject, SelectionMixin
from pandas.core.index import (Index, MultiIndex, _ensure_index,
@@ -198,7 +194,7 @@ def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def __unicode__(self):
# unicode representation based upon iterating over self
@@ -220,7 +216,7 @@ def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
@property
def _constructor_expanddim(self):
@@ -1026,7 +1022,7 @@ def _indexed_same(self, other):
for a in self._AXIS_ORDERS)
def __neg__(self):
- values = _values_from_object(self)
+ values = com._values_from_object(self)
if values.dtype == np.bool_:
arr = operator.inv(values)
else:
@@ -1035,7 +1031,7 @@ def __neg__(self):
def __invert__(self):
try:
- arr = operator.inv(_values_from_object(self))
+ arr = operator.inv(com._values_from_object(self))
return self.__array_wrap__(arr)
except Exception:
@@ -1490,7 +1486,7 @@ def __round__(self, decimals=0):
# Array Interface
def __array__(self, dtype=None):
- return _values_from_object(self)
+ return com._values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
@@ -2204,7 +2200,7 @@ def _iget_item_cache(self, item):
return lower
def _box_item_values(self, key, values):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
@@ -2397,9 +2393,10 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
)
if value == 'raise':
- raise SettingWithCopyError(t)
+ raise com.SettingWithCopyError(t)
elif value == 'warn':
- warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel)
+ warnings.warn(t, com.SettingWithCopyWarning,
+ stacklevel=stacklevel)
def __delitem__(self, key):
"""
@@ -2696,7 +2693,7 @@ def xs(self, key, axis=0, level=None, drop_level=True):
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
- return _maybe_box_datetimelike(new_values)
+ return com._maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values, index=self.columns,
@@ -3557,7 +3554,7 @@ def filter(self, items=None, like=None, regex=None, axis=None):
"""
import re
- nkw = _count_not_none(items, like, regex)
+ nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
@@ -6357,7 +6354,8 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
if try_quick:
try:
- new_other = _values_from_object(self).copy()
+ new_other = com._values_from_object(self)
+ new_other = new_other.copy()
new_other[icond] = other
other = new_other
except Exception:
@@ -7318,7 +7316,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
if freq is None:
- mask = isna(_values_from_object(self))
+ mask = isna(com._values_from_object(self))
np.putmask(rs.values, mask, np.nan)
return rs
@@ -7778,7 +7776,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs):
else:
axis = self._get_axis_number(axis)
- y = _values_from_object(self).copy()
+ y = com._values_from_object(self).copy()
if (skipna and
issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 25e44589488ee..64ce78c78dc53 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -39,10 +39,6 @@
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna, _maybe_fill
-from pandas.core.common import (_values_from_object, AbstractMethodError,
- _default_index, _not_none, _get_callable_name,
- _asarray_tuplesafe, _pipe)
-
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
@@ -61,6 +57,7 @@
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_kwargs
+import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
@@ -751,7 +748,7 @@ def __getattr__(self, attr):
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
- return _pipe(self, func, *args, **kwargs)
+ return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
@@ -895,7 +892,7 @@ def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
@@ -1037,7 +1034,7 @@ def _python_agg_general(self, func, *args, **kwargs):
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
@@ -1045,7 +1042,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False):
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
- for v in _not_none(*values):
+ for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
@@ -1975,7 +1972,7 @@ def apply(self, f, data, axis=0):
group_keys = self._get_group_keys()
# oh boy
- f_name = _get_callable_name(f)
+ f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
@@ -2009,7 +2006,7 @@ def indices(self):
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
- keys = [_values_from_object(ping.group_index)
+ keys = [com._values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@@ -2707,7 +2704,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
- self.grouper = _asarray_tuplesafe(self.grouper)
+ self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
@@ -2934,7 +2931,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
if not any_callable and not all_in_columns_index and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
- keys = [_asarray_tuplesafe(keys)]
+ keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
@@ -3229,7 +3226,7 @@ def _aggregate_multiple_funcs(self, arg, _level):
columns.append(f)
else:
# protect against callables without names
- columns.append(_get_callable_name(f))
+ columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
@@ -3829,7 +3826,7 @@ def _aggregate_generic(self, func, *args, **kwargs):
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
@@ -3891,7 +3888,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# GH12824.
def first_not_none(values):
try:
- return next(_not_none(*values))
+ return next(com._not_none(*values))
except StopIteration:
return None
@@ -4585,7 +4582,7 @@ def groupby_series(obj, col=None):
results = concat(results, axis=1)
if not self.as_index:
- results.index = _default_index(len(results))
+ results.index = com._default_index(len(results))
return results
boxplot = boxplot_frame_groupby
@@ -4675,7 +4672,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
@@ -4731,7 +4728,7 @@ def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
class ArraySplitter(DataSplitter):
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6d0a415f5b420..34578d7a717b1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -41,11 +41,9 @@
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
-from pandas.core.common import (is_bool_indexer, _values_from_object,
- _asarray_tuplesafe, _not_none,
- _index_labels_to_array)
from pandas.core.base import PandasObject, IndexOpsMixin
+import pandas.core.common as com
import pandas.core.base as base
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
@@ -292,7 +290,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
- subarr = _asarray_tuplesafe(data, dtype=object)
+ subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
@@ -361,7 +359,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
- subarr = _asarray_tuplesafe(data, dtype=object)
+ subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
@@ -1498,7 +1496,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None):
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
- keyarr = _asarray_tuplesafe(keyarr)
+ keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
@@ -1736,10 +1734,10 @@ def __getitem__(self, key):
# pessimization of basic indexing.
return promote(getitem(key))
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
key = np.asarray(key)
- key = _values_from_object(key)
+ key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
@@ -2022,8 +2020,8 @@ def equals(self, other):
return other.equals(self)
try:
- return array_equivalent(_values_from_object(self),
- _values_from_object(other))
+ return array_equivalent(com._values_from_object(self),
+ com._values_from_object(other))
except Exception:
return False
@@ -2539,8 +2537,8 @@ def get_value(self, series, key):
# invalid type as an indexer
pass
- s = _values_from_object(series)
- k = _values_from_object(key)
+ s = com._values_from_object(series)
+ k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
@@ -2573,8 +2571,8 @@ def set_value(self, arr, key, value):
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
- self._engine.set_value(_values_from_object(arr),
- _values_from_object(key), value)
+ self._engine.set_value(com._values_from_object(arr),
+ com._values_from_object(key), value)
def _get_level_values(self, level):
"""
@@ -3193,8 +3191,8 @@ def _join_multi(self, other, how, return_indexers=True):
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
- self_names = _not_none(*self.names)
- other_names = _not_none(*other.names)
+ self_names = com._not_none(*self.names)
+ other_names = com._not_none(*other.names)
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
@@ -3766,7 +3764,7 @@ def drop(self, labels, errors='raise'):
If none of the labels are found in the selected axis
"""
arr_dtype = 'object' if self.dtype == 'object' else None
- labels = _index_labels_to_array(labels, dtype=arr_dtype)
+ labels = com._index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
@@ -4001,7 +3999,7 @@ def _validate_for_numeric_binop(self, other, op, opstr):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
- other = _values_from_object(other)
+ other = com._values_from_object(other)
if other.dtype.kind not in ['f', 'i', 'u']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 9a6210db1aacb..2d4655d84dca8 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -11,8 +11,6 @@
is_list_like,
is_interval_dtype,
is_scalar)
-from pandas.core.common import (_asarray_tuplesafe,
- _values_from_object)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core.algorithms import take_1d
@@ -21,6 +19,7 @@
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
+import pandas.core.common as com
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
@@ -442,7 +441,7 @@ def get_value(self, series, key):
know what you're doing
"""
try:
- k = _values_from_object(key)
+ k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
@@ -620,7 +619,7 @@ def _convert_list_indexer(self, keyarr, kind=None):
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
- keyarr = _asarray_tuplesafe(keyarr)
+ keyarr = com._asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 7bb6708e03421..f43c6dc567f69 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -31,9 +31,7 @@
from pandas.core.dtypes.missing import isna
from pandas.core import common as com, algorithms
from pandas.core.algorithms import checked_add_with_arr
-from pandas.core.common import AbstractMethodError
from pandas.errors import NullFrequencyError
-
import pandas.io.formats.printing as printing
from pandas._libs import lib, iNaT, NaT
from pandas._libs.tslibs.period import Period
@@ -245,7 +243,7 @@ def _box_func(self):
"""
box function to get object from internal representation
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _box_values(self, values):
"""
@@ -589,7 +587,7 @@ def argmax(self, axis=None, *args, **kwargs):
@property
def _formatter_func(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _format_attrs(self):
"""
@@ -647,7 +645,7 @@ def _add_datelike(self, other):
type(other).__name__))
def _sub_datelike(self, other):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _sub_period(self, other):
return NotImplemented
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 0349e5c0a448f..afc86a51c02b4 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -34,7 +34,6 @@
import pandas.core.dtypes.concat as _concat
from pandas.errors import PerformanceWarning
-from pandas.core.common import _values_from_object, _maybe_box
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.indexes.base import Index, _index_shared_docs
@@ -126,7 +125,7 @@ def wrapper(self, other):
self._assert_tzawareness_compat(other)
result = func(np.asarray(other))
- result = _values_from_object(result)
+ result = com._values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == libts.iNaT
@@ -1488,8 +1487,8 @@ def get_value(self, series, key):
return series.take(locs)
try:
- return _maybe_box(self, Index.get_value(self, series, key),
- series, key)
+ return com._maybe_box(self, Index.get_value(self, series, key),
+ series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
@@ -1508,9 +1507,9 @@ def get_value_maybe_box(self, series, key):
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
- values = self._engine.get_value(_values_from_object(series),
+ values = self._engine.get_value(com._values_from_object(series),
key, tz=self.tz)
- return _maybe_box(self, values, series, key)
+ return com._maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 58b1bdb3f55ea..0e087c40cfef3 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -35,9 +35,7 @@
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
from pandas.compat.numpy import function as nv
-from pandas.core.common import (
- _all_not_none, _any_none, _asarray_tuplesafe, _count_not_none,
- is_bool_indexer, _maybe_box_datetimelike, _not_none)
+import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
@@ -237,7 +235,8 @@ def __new__(cls, data, closed=None,
data = maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(data)
- if _all_not_none(closed, infer_closed) and closed != infer_closed:
+ if (com._all_not_none(closed, infer_closed) and
+ closed != infer_closed):
# GH 18421
msg = ("conflicting values for closed: constructor got "
"'{closed}', inferred from data '{infer_closed}'"
@@ -602,7 +601,7 @@ def to_tuples(self, na_tuple=True):
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')
"""
- tuples = _asarray_tuplesafe(zip(self.left, self.right))
+ tuples = com._asarray_tuplesafe(zip(self.left, self.right))
if not na_tuple:
# GH 18756
tuples = np.where(~self._isnan, tuples, np.nan)
@@ -975,7 +974,7 @@ def get_loc(self, key, method=None):
return self._engine.get_loc(key)
def get_value(self, series, key):
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
@@ -1347,7 +1346,7 @@ def _is_type_compatible(a, b):
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
- _any_none(a, b))
+ com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
@@ -1426,13 +1425,13 @@ def interval_range(start=None, end=None, periods=None, freq=None,
--------
IntervalIndex : an Index of intervals that are all closed on the same side.
"""
- if _count_not_none(start, end, periods) != 2:
+ if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
- start = _maybe_box_datetimelike(start)
- end = _maybe_box_datetimelike(end)
- endpoint = next(_not_none(start, end))
+ start = com._maybe_box_datetimelike(start)
+ end = com._maybe_box_datetimelike(end)
+ endpoint = next(com._not_none(start, end))
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e50e87f8bd571..797774832aaa5 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -22,11 +22,6 @@
is_scalar)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.errors import PerformanceWarning, UnsortedIndexError
-from pandas.core.common import (_any_not_none,
- _values_from_object,
- is_bool_indexer,
- is_null_slice,
- is_true_slices)
import pandas.core.base as base
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
@@ -539,7 +534,7 @@ def _format_attrs(self):
max_seq_items=False)),
('labels', ibase.default_pprint(self._labels,
max_seq_items=False))]
- if _any_not_none(*self.names):
+ if com._any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
@@ -863,8 +858,8 @@ def get_value(self, series, key):
from pandas.core.indexing import maybe_droplevels
# Label-based
- s = _values_from_object(series)
- k = _values_from_object(key)
+ s = com._values_from_object(series)
+ k = com._values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
@@ -1474,7 +1469,7 @@ def __getitem__(self, key):
return tuple(retval)
else:
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
@@ -1612,7 +1607,7 @@ def drop(self, labels, level=None, errors='raise'):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
- elif is_bool_indexer(loc):
+ elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
@@ -2145,7 +2140,7 @@ def _maybe_str_to_time_stamp(key, lev):
pass
return key
- key = _values_from_object(key)
+ key = com._values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
@@ -2303,7 +2298,7 @@ def partial_selection(key, indexer=None):
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(
- _values_from_object(key)), None)
+ com._values_from_object(key)), None)
else:
return partial_selection(key)
@@ -2463,7 +2458,7 @@ def get_locs(self, seq):
"""
# must be lexsorted to at least as many levels
- true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s]
+ true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
@@ -2480,7 +2475,7 @@ def _convert_to_indexer(r):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
- elif is_bool_indexer(r):
+ elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
@@ -2498,7 +2493,7 @@ def _update_indexer(idxr, indexer=indexer):
for i, k in enumerate(seq):
- if is_bool_indexer(k):
+ if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
@@ -2527,7 +2522,7 @@ def _update_indexer(idxr, indexer=indexer):
# no matches we are done
return Int64Index([])._values
- elif is_null_slice(k):
+ elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
@@ -2594,8 +2589,8 @@ def equals(self, other):
return False
if not isinstance(other, MultiIndex):
- return array_equivalent(self._values,
- _values_from_object(_ensure_index(other)))
+ other_vals = com._values_from_object(_ensure_index(other))
+ return array_equivalent(self._values, other_vals)
if self.nlevels != other.nlevels:
return False
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 6337c2f73d5ec..5e6ebb7588ab9 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -9,10 +9,10 @@
is_bool,
is_bool_dtype,
is_scalar)
-from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
from pandas.core import algorithms
+import pandas.core.common as com
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
@@ -251,9 +251,9 @@ def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
- keyarr = _asarray_tuplesafe(keyarr)
+ keyarr = com._asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
- return _asarray_tuplesafe(keyarr, dtype=np.uint64)
+ return com._asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
@@ -357,9 +357,9 @@ def get_value(self, series, key):
if not is_scalar(key):
raise InvalidIndexError
- k = _values_from_object(key)
+ k = com._values_from_object(key)
loc = self.get_loc(k)
- new_values = _values_from_object(series)[loc]
+ new_values = com._values_from_object(series)[loc]
return new_values
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 10a923c056be2..1a18b86acf57f 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -13,7 +13,8 @@
from pandas import compat
from pandas.compat import lrange, range, get_range_parameters
from pandas.compat.numpy import function as nv
-from pandas.core.common import _all_none
+
+import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
@@ -89,7 +90,7 @@ def _ensure_int(value, field):
return new_value
- if _all_none(start, stop, step):
+ if com._all_none(start, stop, step):
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 3e671731be348..b88ee88210cfe 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -17,7 +17,6 @@
_ensure_int64)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCSeries
-from pandas.core.common import _maybe_box, _values_from_object
from pandas.core.indexes.base import Index
from pandas.core.indexes.numeric import Int64Index
@@ -77,7 +76,7 @@ def wrapper(self, other):
other = TimedeltaIndex(other).values
result = func(other)
- result = _values_from_object(result)
+ result = com._values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == iNaT
@@ -710,8 +709,8 @@ def get_value(self, series, key):
return self.get_value_maybe_box(series, key)
try:
- return _maybe_box(self, Index.get_value(self, series, key),
- series, key)
+ return com._maybe_box(self, Index.get_value(self, series, key),
+ series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
@@ -727,8 +726,8 @@ def get_value(self, series, key):
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
- values = self._engine.get_value(_values_from_object(series), key)
- return _maybe_box(self, values, series, key)
+ values = self._engine.get_value(com._values_from_object(series), key)
+ return com._maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index e2c4043f0508d..3ca150cda83c7 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -20,9 +20,6 @@
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
-from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe,
- is_null_slice, is_full_slice,
- _values_from_object)
from pandas._libs.indexing import _NDFrameIndexerBase
@@ -314,7 +311,7 @@ def _setitem_with_indexer(self, indexer, value):
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
- not (is_integer(i) or is_null_slice(i))):
+ not (is_integer(i) or com.is_null_slice(i))):
take_split_path = True
break
@@ -519,8 +516,8 @@ def setter(item, v):
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
- all(is_null_slice(idx) or
- is_full_slice(idx, len(self.obj))
+ all(com.is_null_slice(idx) or
+ com.is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
@@ -613,8 +610,10 @@ def can_do_equal_len():
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
- all(is_null_slice(idx) for i, idx in enumerate(indexer)
- if i != info_axis) and item_labels.is_unique):
+ all(com.is_null_slice(idx)
+ for i, idx in enumerate(indexer)
+ if i != info_axis) and
+ item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
@@ -667,7 +666,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False):
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
- aligners = [not is_null_slice(idx) for idx in indexer]
+ aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
@@ -706,7 +705,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False):
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
- if single_aligner and is_null_slice(idx):
+ if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
@@ -767,7 +766,7 @@ def _align_frame(self, indexer, df):
if isinstance(indexer, tuple):
- aligners = [not is_null_slice(idx) for idx in indexer]
+ aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa
@@ -869,7 +868,7 @@ def _getitem_tuple(self, tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
- if is_null_slice(key):
+ if com.is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
@@ -890,7 +889,7 @@ def _multi_take_opportunity(self, tup):
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
- elif is_bool_indexer(indexer):
+ elif com.is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
@@ -915,7 +914,7 @@ def _convert_for_reindex(self, key, axis=None):
axis = self.axis or 0
labels = self.obj._get_axis(axis)
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
@@ -923,7 +922,7 @@ def _convert_for_reindex(self, key, axis=None):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
- keyarr = _asarray_tuplesafe(key)
+ keyarr = com._asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
@@ -1011,7 +1010,7 @@ def _getitem_lowerdim(self, tup):
# Slices should return views, but calling iloc/loc with a null
# slice returns a new object.
- if is_null_slice(new_key):
+ if com.is_null_slice(new_key):
return section
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
@@ -1040,7 +1039,7 @@ def _getitem_nested_tuple(self, tup):
axis = 0
for i, key in enumerate(tup):
- if is_null_slice(key):
+ if com.is_null_slice(key):
axis += 1
continue
@@ -1113,7 +1112,7 @@ def _getitem_iterable(self, key, axis=None):
labels = self.obj._get_axis(axis)
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj._take(inds, axis=axis, convert=False)
@@ -1235,7 +1234,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False):
elif is_list_like_indexer(obj):
- if is_bool_indexer(obj):
+ if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
@@ -1265,7 +1264,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False):
raise KeyError('{mask} not in index'
.format(mask=objarr[mask]))
- return _values_from_object(indexer)
+ return com._values_from_object(indexer)
else:
try:
@@ -1336,7 +1335,7 @@ def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
- elif is_bool_indexer(key):
+ elif com.is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
@@ -1448,7 +1447,7 @@ def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
- elif is_bool_indexer(key):
+ elif com.is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
@@ -1576,7 +1575,7 @@ def _getitem_axis(self, key, axis=None):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
- elif is_bool_indexer(key):
+ elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
@@ -1653,7 +1652,7 @@ class _iLocIndexer(_LocationIndexer):
_exception = IndexError
def _has_valid_type(self, key, axis):
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
@@ -1743,7 +1742,7 @@ def _getitem_tuple(self, tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
- if is_null_slice(key):
+ if com.is_null_slice(key):
axis += 1
continue
@@ -1807,7 +1806,7 @@ def _getitem_axis(self, key, axis=None):
except TypeError: # pragma: no cover
pass
- if is_bool_indexer(key):
+ if com.is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d95062c54b4c6..516b58a26510c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -56,7 +56,7 @@
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex
-from pandas.core.common import is_null_slice, _any_not_none
+import pandas.core.common as com
import pandas.core.algorithms as algos
from pandas.core.index import Index, MultiIndex, _ensure_index
@@ -591,7 +591,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
categories = kwargs.get('categories', None)
ordered = kwargs.get('ordered', None)
- if _any_not_none(categories, ordered):
+ if com._any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
@@ -1733,7 +1733,7 @@ def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
- if not is_null_slice(col) and col != 0:
+ if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
@@ -2645,7 +2645,7 @@ def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
- if not is_null_slice(col) and col != 0:
+ if not com.is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index d1a355021f388..63989304bb5f9 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -20,7 +20,7 @@
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
from pandas.core.config import get_option
-from pandas.core.common import _values_from_object
+import pandas.core.common as com
_BOTTLENECK_INSTALLED = False
_MIN_BOTTLENECK_VERSION = '1.0.0'
@@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
if necessary copy and mask using the specified fill_value
copy = True will force the copy
"""
- values = _values_from_object(values)
+ values = com._values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
@@ -376,7 +376,7 @@ def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
- return algos.median(_values_from_object(x[mask]))
+ return algos.median(com._values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
@@ -437,7 +437,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1):
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
- values = _values_from_object(values)
+ values = com._values_from_object(values)
dtype = values.dtype
mask = isna(values)
if is_any_int_dtype(values):
@@ -546,7 +546,7 @@ def nanskew(values, axis=None, skipna=True):
"""
- values = _values_from_object(values)
+ values = com._values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
@@ -604,7 +604,7 @@ def nankurt(values, axis=None, skipna=True):
central moment.
"""
- values = _values_from_object(values)
+ values = com._values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index fc04d9d291bf9..343b62940173e 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -18,9 +18,9 @@
from pandas.compat import bind_method
import pandas.core.missing as missing
+import pandas.core.common as com
from pandas.errors import NullFrequencyError
-from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.core.dtypes.missing import notna, isna
from pandas.core.dtypes.common import (
needs_i8_conversion,
@@ -352,7 +352,7 @@ def na_op(x, y):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
mask = notna(x) & notna(y)
- result[mask] = op(x[mask], _values_from_object(y[mask]))
+ result[mask] = op(x[mask], com._values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notna(x)
@@ -453,7 +453,7 @@ def dispatch_to_index_op(op, left, right, index_class):
def _get_series_op_result_name(left, right):
# `left` is always a pd.Series
if isinstance(right, (ABCSeries, pd.Index)):
- name = _maybe_match_name(left, right)
+ name = com._maybe_match_name(left, right)
else:
name = left.name
return name
@@ -516,7 +516,7 @@ def na_op(x, y):
if is_scalar(y):
mask = isna(x)
- y = libindex.convert_scalar(x, _values_from_object(y))
+ y = libindex.convert_scalar(x, com._values_from_object(y))
else:
mask = isna(x) | isna(y)
y = y.view('i8')
@@ -541,7 +541,7 @@ def wrapper(self, other, axis=None):
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
- name = _maybe_match_name(self, other)
+ name = com._maybe_match_name(self, other)
if not self._indexed_same(other):
msg = 'Can only compare identically-labeled Series objects'
raise ValueError(msg)
@@ -593,7 +593,7 @@ def wrapper(self, other, axis=None):
.format(typ=type(other)))
# always return a full value series here
- res = _values_from_object(res)
+ res = com._values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype='bool')
return res
@@ -645,7 +645,7 @@ def wrapper(self, other):
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCSeries):
- name = _maybe_match_name(self, other)
+ name = com._maybe_match_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1df69576e6ff2..ae86074ce2d05 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -17,11 +17,10 @@
import pandas.core.ops as ops
import pandas.core.missing as missing
+import pandas.core.common as com
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict)
from pandas.compat.numpy import function as nv
-from pandas.core.common import (_try_sort, _default_index, _all_not_none,
- _any_not_none, _apply_if_callable)
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
@@ -31,7 +30,6 @@
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
-from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import Appender
@@ -174,7 +172,7 @@ def _init_data(self, data, copy, dtype, **kwargs):
axes = None
if isinstance(data, BlockManager):
- if _any_not_none(*passed_axes):
+ if com._any_not_none(*passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
@@ -186,7 +184,7 @@ def _init_data(self, data, copy, dtype, **kwargs):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
- elif is_scalar(data) and _all_not_none(*passed_axes):
+ elif is_scalar(data) and com._all_not_none(*passed_axes):
values = cast_scalar_to_array([len(x) for x in passed_axes],
data, dtype=dtype)
mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
@@ -209,7 +207,7 @@ def _init_dict(self, data, axes, dtype=None):
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
- ks = _try_sort(ks)
+ ks = com._try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
@@ -287,7 +285,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None):
return cls(**d)
def __getitem__(self, key):
- key = _apply_if_callable(key, self)
+ key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
@@ -325,7 +323,7 @@ def _init_matrix(self, data, axes, dtype=None, copy=False):
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
- ax = _default_index(shape[i])
+ ax = com._default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
@@ -601,7 +599,7 @@ def _box_item_values(self, key, values):
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
- key = _apply_if_callable(key, self)
+ key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
@@ -1545,9 +1543,9 @@ def na_op(x, y):
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
- if name in _op_descriptions:
+ if name in ops._op_descriptions:
op_name = name.replace('__', '')
- op_desc = _op_descriptions[op_name]
+ op_desc = ops._op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 5447ce7470b9d..c215d9d5cffcc 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -5,7 +5,7 @@
from textwrap import dedent
import pandas as pd
-from pandas.core.base import AbstractMethodError, GroupByMixin
+from pandas.core.base import GroupByMixin
from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy,
SeriesGroupBy, groupby, PanelGroupBy,
@@ -233,7 +233,7 @@ def _convert_obj(self, obj):
return obj
def _get_binner_for_time(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _set_binner(self):
"""
@@ -372,10 +372,10 @@ def transform(self, arg, *args, **kwargs):
arg, *args, **kwargs)
def _downsample(self, f):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _gotitem(self, key, ndim, subset=None):
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index be40f65186d2d..470dd23f26316 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -39,15 +39,6 @@
construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
-from pandas.core.common import (is_bool_indexer,
- _default_index,
- _asarray_tuplesafe,
- _values_from_object,
- _maybe_match_name,
- SettingWithCopyError,
- _maybe_box_datetimelike,
- standardize_mapping,
- _any_none)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
Float64Index, _ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
@@ -230,7 +221,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
if index is None:
if not is_list_like(data):
data = [data]
- index = _default_index(len(data))
+ index = com._default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
@@ -688,7 +679,7 @@ def __getitem__(self, key):
pass
elif key is Ellipsis:
return self
- elif is_bool_indexer(key):
+ elif com.is_bool_indexer(key):
pass
else:
@@ -762,7 +753,7 @@ def _get_with(self, key):
def _get_values_tuple(self, key):
# mpl hackaround
- if _any_none(*key):
+ if com._any_none(*key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
@@ -787,7 +778,7 @@ def setitem(key, value):
try:
self._set_with_engine(key, value)
return
- except (SettingWithCopyError):
+ except com.SettingWithCopyError:
raise
except (KeyError, ValueError):
values = self._values
@@ -887,7 +878,7 @@ def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
- key = _asarray_tuplesafe(key)
+ key = com._asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
@@ -939,7 +930,7 @@ def get_value(self, label, takeable=False):
def _get_value(self, label, takeable=False):
if takeable is True:
- return _maybe_box_datetimelike(self._values[label])
+ return com._maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
_get_value.__doc__ = get_value.__doc__
@@ -1039,7 +1030,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
- new_index = _default_index(len(self))
+ new_index = com._default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
@@ -1182,7 +1173,7 @@ def to_dict(self, into=dict):
defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
- into_c = standardize_mapping(into)
+ into_c = com.standardize_mapping(into)
return into_c(compat.iteritems(self))
def to_frame(self, name=None):
@@ -1260,7 +1251,7 @@ def count(self, level=None):
from pandas.core.index import _get_na_value
if level is None:
- return notna(_values_from_object(self)).sum()
+ return notna(com._values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
@@ -1342,7 +1333,7 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs):
numpy.ndarray.argmin
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
- i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
+ i = nanops.nanargmin(com._values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
@@ -1378,7 +1369,7 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs):
numpy.ndarray.argmax
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
- i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
+ i = nanops.nanargmax(com._values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
@@ -1419,7 +1410,7 @@ def round(self, decimals=0, *args, **kwargs):
"""
nv.validate_round(args, kwargs)
- result = _values_from_object(self).round(decimals)
+ result = com._values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
@@ -1536,7 +1527,7 @@ def diff(self, periods=1):
-------
diffed : Series
"""
- result = algorithms.diff(_values_from_object(self), periods)
+ result = algorithms.diff(com._values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
@@ -1737,7 +1728,7 @@ def _binop(self, other, func, level=None, fill_value=None):
with np.errstate(all='ignore'):
result = func(this_vals, other_vals)
- name = _maybe_match_name(self, other)
+ name = com._maybe_match_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
@@ -1778,7 +1769,7 @@ def combine(self, other, func, fill_value=np.nan):
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
- new_name = _maybe_match_name(self, other)
+ new_name = com._maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
@@ -1823,7 +1814,7 @@ def combine_first(self, other):
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
# TODO: do we need name?
- name = _maybe_match_name(self, other) # noqa
+ name = com._maybe_match_name(self, other) # noqa
rs_vals = com._where_compat(isna(this), other._values, this._values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
@@ -1911,7 +1902,7 @@ def _try_kind_sort(arr):
bad = isna(arr)
good = ~bad
- idx = _default_index(len(self))
+ idx = com._default_index(len(self))
argsorted = _try_kind_sort(arr[good])
@@ -2784,7 +2775,7 @@ def isin(self, values):
dtype: bool
"""
- result = algorithms.isin(_values_from_object(self), values)
+ result = algorithms.isin(com._values_from_object(self), values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
@@ -3253,7 +3244,7 @@ def _try_cast(arr, take_fast_path):
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
- subarr = _asarray_tuplesafe(data, dtype=dtype)
+ subarr = com._asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 49a0b8d86ad31..c7f5b0ba67c19 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -14,12 +14,10 @@
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse
-from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
-from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
- _default_index)
+from pandas.core.frame import DataFrame, extract_index, _prep_ndarray
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
@@ -28,7 +26,7 @@
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
-
+import pandas.core.common as com
_shared_doc_kwargs = dict(klass='SparseDataFrame')
@@ -133,7 +131,7 @@ def _init_dict(self, data, index, columns, dtype=None):
columns = _ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
- columns = Index(_try_sort(list(data.keys())))
+ columns = Index(com._try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
@@ -208,9 +206,9 @@ def _init_spmatrix(self, data, index, columns, dtype=None,
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
- index = _default_index(N)
+ index = com._default_index(N)
if columns is None:
- columns = _default_index(K)
+ columns = com._default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index b5d2c0b607444..4b649927f8f72 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -10,7 +10,6 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
-from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
@@ -80,7 +79,7 @@ def wrapper(self, other):
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
- new_name = _maybe_match_name(left, right)
+ new_name = com._maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
@@ -423,7 +422,7 @@ def __getitem__(self, key):
# Could not hash item, must be array-like?
pass
- key = _values_from_object(key)
+ key = com._values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 278b220753196..5c31b9a5668ff 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -12,8 +12,8 @@
is_scalar,
is_integer,
is_re)
-from pandas.core.common import _values_from_object
+import pandas.core.common as com
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.core.base import NoNewAttributesMixin
@@ -37,7 +37,7 @@
def _get_array_list(arr, others):
from pandas.core.series import Series
- if len(others) and isinstance(_values_from_object(others)[0],
+ if len(others) and isinstance(com._values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
@@ -461,7 +461,7 @@ def rep(x, r):
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
- result = lib.vec_binop(_values_from_object(arr), repeats, rep)
+ result = lib.vec_binop(com._values_from_object(arr), repeats, rep)
return result
@@ -1235,7 +1235,6 @@ def str_translate(arr, table, deletechars=None):
if deletechars is None:
f = lambda x: x.translate(table)
else:
- from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 5d2fa16876c11..4d6a1de60f59b 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -32,7 +32,7 @@
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
-from pandas.core.common import _asarray_tuplesafe, _count_not_none
+import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
@@ -508,7 +508,7 @@ def _prep_window(self, **kwargs):
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
- return _asarray_tuplesafe(window).astype(float)
+ return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
@@ -1908,33 +1908,33 @@ def dataframe_from_int_dict(data, frame_template):
return _flex_binary_moment(arg2, arg1, f)
-def _get_center_of_mass(com, span, halflife, alpha):
- valid_count = _count_not_none(com, span, halflife, alpha)
+def _get_center_of_mass(comass, span, halflife, alpha):
+ valid_count = com._count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
- raise ValueError("com, span, halflife, and alpha "
+ raise ValueError("comass, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
- if com is not None:
- if com < 0:
- raise ValueError("com must satisfy: com >= 0")
+ if comass is not None:
+ if comass < 0:
+ raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
- com = (span - 1) / 2.
+ comass = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
- com = 1 / decay - 1
+ comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
- com = (1.0 - alpha) / alpha
+ comass = (1.0 - alpha) / alpha
else:
- raise ValueError("Must pass one of com, span, halflife, or alpha")
+ raise ValueError("Must pass one of comass, span, halflife, or alpha")
- return float(com)
+ return float(comass)
def _offset(window, center):
diff --git a/pandas/io/common.py b/pandas/io/common.py
index c2d1da5a1035d..4ba969f0abac4 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -9,7 +9,7 @@
from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
from pandas.io.formats.printing import pprint_thing
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas.core.dtypes.common import is_number, is_file_like
# compat
@@ -66,7 +66,7 @@ def __iter__(self):
return self
def __next__(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
if not compat.PY3:
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index aff3e35861434..2fc648d2952c4 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -10,7 +10,7 @@
from pandas.compat import reduce
from pandas.io.formats.css import CSSResolver, CSSWarning
from pandas.io.formats.printing import pprint_thing
-from pandas.core.common import _any_not_none
+import pandas.core.common as com
from pandas.core.dtypes.common import is_float, is_scalar
from pandas.core.dtypes import missing
from pandas import Index, MultiIndex, PeriodIndex
@@ -549,7 +549,7 @@ def _format_hierarchical_rows(self):
self.rowcounter += 1
# if index labels are not empty go ahead and dump
- if _any_not_none(*index_labels) and self.header is not False:
+ if com._any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 886a887568d69..2293032ebb8a1 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -27,7 +27,7 @@
is_list_like)
from pandas.core.dtypes.generic import ABCSparseArray
from pandas.core.base import PandasObject
-from pandas.core.common import _any_not_none, sentinel_factory
+import pandas.core.common as com
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import (StringIO, lzip, range, map, zip, u,
@@ -1277,7 +1277,7 @@ def _column_header():
if self.fmt.sparsify:
# GH3547
- sentinel = sentinel_factory()
+ sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel, adjoin=False,
@@ -1446,7 +1446,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
if self.fmt.sparsify:
# GH3547
- sentinel = sentinel_factory()
+ sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False,
names=False)
@@ -2372,7 +2372,7 @@ def single_row_table(row): # pragma: no cover
def _has_names(index):
if isinstance(index, MultiIndex):
- return _any_not_none(*index.names)
+ return com._any_not_none(*index.names)
else:
return index.name is not None
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 2c3d92cea0ad8..58796aa30f0bf 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -27,7 +27,7 @@
from pandas.compat import range
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
-from pandas.core.common import _any_not_none, sentinel_factory
+import pandas.core.common as com
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
from pandas.util._decorators import Appender
try:
@@ -257,7 +257,8 @@ def format_attr(pair):
row_es.append(es)
head.append(row_es)
- if (self.data.index.names and _any_not_none(*self.data.index.names) and
+ if (self.data.index.names and
+ com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
@@ -1207,7 +1208,7 @@ def _get_level_lengths(index, hidden_elements=None):
Result is a dictionary of (level, inital_position): span
"""
- sentinel = sentinel_factory()
+ sentinel = com.sentinel_factory()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
diff --git a/pandas/io/html.py b/pandas/io/html.py
index e7794864ccb3e..be4854bc19cc6 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -20,7 +20,7 @@
from pandas.compat import (lrange, lmap, u, string_types, iteritems,
raise_with_traceback, binary_type)
from pandas import Series
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
_IMPORTS = False
@@ -234,7 +234,7 @@ def _text_getter(self, obj):
text : str or unicode
The text from an individual DOM node.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_td(self, obj):
"""Return the td elements from a row element.
@@ -248,7 +248,7 @@ def _parse_td(self, obj):
columns : list of node-like
These are the elements of each row, i.e., the columns.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""Return all tables from the parsed DOM.
@@ -275,7 +275,7 @@ def _parse_tables(self, doc, match, attrs):
tables : list of node-like
A list of <table> elements to be parsed into raw data.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
@@ -290,7 +290,7 @@ def _parse_tr(self, table):
rows : list of node-like
A list row elements of a table, usually <tr> or <th> elements.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_thead(self, table):
"""Return the header of a table.
@@ -305,7 +305,7 @@ def _parse_thead(self, table):
thead : node-like
A <thead>...</thead> element.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_tbody(self, table):
"""Return the body of the table.
@@ -320,7 +320,7 @@ def _parse_tbody(self, table):
tbody : node-like
A <tbody>...</tbody> element.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _parse_tfoot(self, table):
"""Return the footer of the table if any.
@@ -335,7 +335,7 @@ def _parse_tfoot(self, table):
tfoot : node-like
A <tfoot>...</tfoot> element.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
@@ -344,7 +344,7 @@ def _build_doc(self):
-------
obj : tree-like
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _build_table(self, table):
header = self._parse_raw_thead(table)
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 6d35fc5769331..e3a1321336fb3 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -12,7 +12,7 @@
_infer_compression, _stringify_path,
BaseIterator)
from pandas.io.parsers import _validate_integer
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas.core.reshape.concat import concat
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
@@ -93,7 +93,7 @@ def __init__(self, obj, orient, date_format, double_precision,
self._format_axes()
def _format_axes(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def write(self):
return self._write(self.obj, self.orient, self.double_precision,
@@ -648,7 +648,7 @@ def _convert_axes(self):
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
@@ -761,7 +761,7 @@ def _try_convert_to_date(self, data):
return data, False
def _try_convert_dates(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
class SeriesParser(Parser):
diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py
index 89b7a1de8acfc..01f7db7d68664 100644
--- a/pandas/io/json/table_schema.py
+++ b/pandas/io/json/table_schema.py
@@ -8,7 +8,7 @@
import pandas._libs.json as json
from pandas import DataFrame
from pandas.api.types import CategoricalDtype
-from pandas.core.common import _all_not_none
+import pandas.core.common as com
from pandas.core.dtypes.common import (
is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype,
is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
@@ -69,7 +69,7 @@ def as_json_table_type(x):
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
- if _all_not_none(*data.index.names):
+ if com._all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == 'index':
warnings.warn("Index name of 'index' is not round-trippable")
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 1d3fd8552eeb7..4508d5c1e1781 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -4,7 +4,7 @@
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
from pandas.compat import string_types
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer, is_s3_url
@@ -64,10 +64,10 @@ def validate_dataframe(df):
raise ValueError("Index level names must be strings")
def write(self, df, path, compression, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 0d2c4a3e9f629..5135bb01fb378 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -30,7 +30,7 @@
from pandas.core.frame import DataFrame
from pandas.core.arrays import Categorical
from pandas.core import algorithms
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas.io.date_converters import generic_parser
from pandas.errors import ParserWarning, ParserError, EmptyDataError
from pandas.io.common import (get_filepath_or_buffer, is_file_like,
@@ -1010,7 +1010,7 @@ def _make_engine(self, engine='c'):
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def read(self, nrows=None):
nrows = _validate_integer('nrows', nrows)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c8490167022e5..106823199ee93 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -34,7 +34,7 @@
from pandas.core.base import StringMixin
from pandas.io.formats.printing import adjoin, pprint_thing
from pandas.errors import PerformanceWarning
-from pandas.core.common import _asarray_tuplesafe, _all_none
+import pandas.core.common as com
from pandas.core.algorithms import match, unique
from pandas.core.arrays.categorical import (Categorical,
_factorize_from_iterables)
@@ -903,7 +903,7 @@ def remove(self, key, where=None, start=None, stop=None):
raise KeyError('No object named %s in the file' % key)
# remove the node
- if _all_none(where, start, stop):
+ if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
@@ -2368,7 +2368,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
support fully deleting the node in its entirety (only) - where
specification must be None
"""
- if _all_none(where, start, stop):
+ if com._all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
@@ -3844,7 +3844,7 @@ def read(self, where=None, columns=None, **kwargs):
tuple_index = long_index.values
unique_tuples = lib.fast_unique(tuple_index)
- unique_tuples = _asarray_tuplesafe(unique_tuples)
+ unique_tuples = com._asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = _ensure_platform_int(indexer)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 3094d7d0ab1c6..8b03d6ddde4ec 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -10,6 +10,7 @@
import numpy as np
from pandas.util._decorators import cache_readonly
+import pandas.core.common as com
from pandas.core.base import PandasObject
from pandas.core.config import get_option
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
@@ -21,7 +22,6 @@
is_iterator)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
-from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
@@ -225,7 +225,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None):
# TODO: unused?
# if self.sort_columns:
- # columns = _try_sort(data.columns)
+ # columns = com._try_sort(data.columns)
# else:
# columns = data.columns
@@ -367,7 +367,7 @@ def _compute_plot_data(self):
self.data = numeric_data
def _make_plot(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _add_table(self):
if self.table is False:
@@ -609,7 +609,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
- if _any_not_none(*name):
+ if com._any_not_none(*name):
name = ','.join(pprint_thing(x) for x in name)
else:
name = None
@@ -957,7 +957,7 @@ def _make_plot(self):
it = self._iter_data()
stacking_id = self._get_stacking_id()
- is_errorbar = _any_not_none(*self.errors.values())
+ is_errorbar = com._any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
@@ -2182,7 +2182,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
layout=layout)
_axes = _flatten(axes)
- for i, col in enumerate(_try_sort(data.columns)):
+ for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py
index 887202e22b4e0..426b29a8840f4 100644
--- a/pandas/plotting/_style.py
+++ b/pandas/plotting/_style.py
@@ -44,12 +44,12 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
- from pandas.core.common import _random_state
+ import pandas.core.common as com
def random_color(column):
""" Returns a random color represented as a list of length 3"""
# GH17525 use common._random_state to avoid resetting the seed
- rs = _random_state(column)
+ rs = com._random_state(column)
return rs.rand(3).tolist()
colors = lmap(random_color, lrange(num_colors))
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 0ca25735fc03f..da881e6f29bc9 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -9,7 +9,7 @@
import numpy as np
from pandas.compat import (lmap, range, lrange, StringIO, u)
-from pandas.core.common import _all_none
+import pandas.core.common as com
from pandas.errors import ParserError
from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp,
date_range, read_csv, compat, to_datetime)
@@ -572,7 +572,7 @@ def _make_frame(names=None):
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
- assert _all_none(*result.columns.names)
+ assert com._all_none(*result.columns.names)
result.columns.names = df.columns.names
assert_frame_equal(df, result)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 9895ee06a22c0..e8a7bc50d8e3c 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -6,7 +6,7 @@
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, date_range, timedelta_range, Categorical)
from pandas.compat import lzip
-from pandas.core.common import _asarray_tuplesafe
+import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@@ -1177,7 +1177,7 @@ def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
- expected = Index(_asarray_tuplesafe(tuples))
+ expected = Index(com._asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
@@ -1193,7 +1193,7 @@ def test_to_tuples_na(self, tuples, na_tuple):
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
- expected_notna = Index(_asarray_tuplesafe(tuples[:-1]))
+ expected_notna = Index(com._asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index 0ea4757b10e94..ec240531925e3 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
from pandas import read_csv, read_table, DataFrame
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
from pandas._libs.lib import Timestamp
from pandas.compat import StringIO
@@ -43,7 +43,7 @@ def read_table(self, *args, **kwargs):
raise NotImplementedError
def float_precision_choices(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def setup_method(self, method):
self.dirpath = tm.get_data_path()
diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py
index 23dad9736dac5..c9e6e84d226a8 100644
--- a/pandas/tests/scalar/test_interval.py
+++ b/pandas/tests/scalar/test_interval.py
@@ -2,7 +2,7 @@
import numpy as np
from pandas import Interval, Timestamp, Timedelta
-from pandas.core.common import _any_none
+import pandas.core.common as com
import pytest
import pandas.util.testing as tm
@@ -197,6 +197,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
- error = TypeError if _any_none(tz_left, tz_right) else ValueError
+ error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 6b3b519d49f7f..b1e3177547ac6 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -17,7 +17,7 @@
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
-from pandas.core.common import _asarray_tuplesafe
+import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
@@ -217,7 +217,8 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level):
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
- expected_level_array = _asarray_tuplesafe(expected_level, dtype=object)
+ expected_level_array = com._asarray_tuplesafe(expected_level,
+ dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index e9a517605020a..515850c14ecd6 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -20,9 +20,10 @@
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
-from pandas.core.base import SpecificationError, AbstractMethodError
+from pandas.core.base import SpecificationError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby import DataError
+import pandas.core.common as com
from pandas.tseries.frequencies import to_offset
from pandas.core.indexes.datetimes import date_range
@@ -726,7 +727,7 @@ def index(self, _index_start, _index_end, _index_freq):
@pytest.fixture
def _series_name(self):
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
@pytest.fixture
def _static_values(self, index):
diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index 8da2b401fc848..3b0a428218771 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -8,7 +8,7 @@
import pytest
from pandas.compat import intern
-from pandas.core.common import _all_none
+import pandas.core.common as com
from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
from pandas.util._decorators import deprecate_kwarg, make_signature
from pandas.util._validators import (validate_args, validate_kwargs,
@@ -438,7 +438,7 @@ def test_set_locale(self):
pytest.skip("Only a single locale found, no point in "
"trying to test setting another locale")
- if _all_none(*self.current_locale):
+ if com._all_none(*self.current_locale):
# Not sure why, but on some travis runs with pytest,
# getlocale() returned (None, None).
pytest.skip("Current locale is not set.")
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index e6b9f66c094c1..ec206e0997d0b 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -9,7 +9,7 @@
from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod
from pandas.core.tools.datetimes import to_datetime
-from pandas.core.common import AbstractMethodError
+import pandas.core.common as com
# import after tools, dateutil check
from dateutil.easter import easter
@@ -1148,7 +1148,7 @@ def apply(self, other):
def _apply(self, n, other):
"""Handle specific apply logic for child classes"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
@apply_index_wraps
def apply_index(self, i):
@@ -1182,11 +1182,11 @@ def _get_roll(self, i, before_day_of_month, after_day_of_month):
The roll array is based on the fact that i gets rolled back to
the first day of the month.
"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
def _apply_index_days(self, i, roll):
"""Apply the correct day for each date in i"""
- raise AbstractMethodError(self)
+ raise com.AbstractMethodError(self)
class SemiMonthEnd(SemiMonthOffset):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 1bea25a16ca1e..30915f7891c8c 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -32,7 +32,7 @@
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
-from pandas.core.common import _all_not_none
+import pandas.core.common as com
import pandas.compat as compat
from pandas.compat import (
@@ -484,7 +484,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL):
except ValueError:
yield new_locale
else:
- if _all_not_none(*normalized_locale):
+ if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
| There are a bunch of modules that will do both `import pandas.core.common as com` and `from pandas.core.common import _whatever`, then sprinkled throughout we'll see both `_whatever` and `com._whatever`. This PR tracks down a bunch of those and standardizes on `com._whatever`.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19292 | 2018-01-18T04:26:04Z | 2018-01-21T16:43:13Z | 2018-01-21T16:43:13Z | 2022-11-15T19:24:38Z |
DOC: add `source activate` for older versions of Anaconda and fix a typo | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index cdbbad6eb75d6..258ab874cafcf 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -171,6 +171,9 @@ We'll now kick off a three-step process:
# Create and activate the build environment
conda env create -f ci/environment-dev.yaml
conda activate pandas-dev
+
+ # or with older versions of Anaconda:
+ source activate pandas-dev
# Build and install pandas
python setup.py build_ext --inplace -j 4
@@ -456,7 +459,7 @@ Here are *some* of the more common ``cpplint`` issues:
- we restrict line-length to 80 characters to promote readability
- every header file must include a header guard to avoid name collisions if re-included
-:ref:`Continuous Integration <contributing.ci>`. will run the
+:ref:`Continuous Integration <contributing.ci>` will run the
`cpplint <https://pypi.python.org/pypi/cpplint>`_ tool
and report any stylistic errors in your code. Therefore, it is helpful before
submitting code to run the check yourself::
| In the contrib docs, `conda activate` should be `source activate`, and there was also an extraneous period after the "continuous integration" heading. | https://api.github.com/repos/pandas-dev/pandas/pulls/19282 | 2018-01-17T13:57:23Z | 2018-01-19T08:57:42Z | 2018-01-19T08:57:42Z | 2018-01-19T08:57:42Z |
BUG: timezone comparisions are inconsistent, manifesting in bugs in .concat | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 853d5cee11cd1..a93e0b1a3b0dd 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -427,7 +427,6 @@ Conversion
- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`)
--
-
- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
@@ -503,6 +502,7 @@ Reshaping
- Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`)
- Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`)
- Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`)
+- Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`)
-
Numeric
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index f1da60057186c..e1ffd450c9a68 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -6,7 +6,7 @@ cimport cython
import cython
from numpy cimport ndarray
from tslib import Timestamp
-from tslibs.timezones cimport get_timezone
+from tslibs.timezones cimport tz_compare
from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE,
PyObject_RichCompare)
@@ -131,7 +131,7 @@ cdef class Interval(IntervalMixin):
if not left <= right:
raise ValueError('left side of interval must be <= right side')
if (isinstance(left, Timestamp) and
- get_timezone(left.tzinfo) != get_timezone(right.tzinfo)):
+ not tz_compare(left.tzinfo, right.tzinfo)):
# GH 18538
msg = ("left and right must have the same time zone, got "
"'{left_tz}' and '{right_tz}'")
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index b74b3a79fd69a..e15f276b39bf8 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -5,7 +5,7 @@ cimport cython
from tslibs.nattype import NaT
from tslibs.conversion cimport convert_to_tsobject
from tslibs.timedeltas cimport convert_to_timedelta64
-from tslibs.timezones cimport get_timezone
+from tslibs.timezones cimport get_timezone, tz_compare
from datetime import datetime, timedelta
iNaT = util.get_nat()
@@ -907,7 +907,7 @@ cpdef bint is_datetime_with_singletz_array(ndarray values):
val = values[j]
if val is not NaT:
tz = getattr(val, 'tzinfo', None)
- if base_tz != tz and base_tz != get_timezone(tz):
+ if not tz_compare(base_tz, tz):
return False
break
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 53abdd013ec37..9cfe41172fedc 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -35,7 +35,7 @@ from timedeltas cimport cast_from_unit
from timezones cimport (is_utc, is_tzlocal, is_fixed_offset,
treat_tz_as_dateutil, treat_tz_as_pytz,
get_utcoffset, get_dst_info,
- get_timezone, maybe_get_tz)
+ get_timezone, maybe_get_tz, tz_compare)
from parsing import parse_datetime_string
from nattype import nat_strings, NaT
@@ -169,7 +169,7 @@ def datetime_to_datetime64(ndarray[object] values):
elif PyDateTime_Check(val):
if val.tzinfo is not None:
if inferred_tz is not None:
- if get_timezone(val.tzinfo) != inferred_tz:
+ if not tz_compare(val.tzinfo, inferred_tz):
raise ValueError('Array must be all same time zone')
else:
inferred_tz = get_timezone(val.tzinfo)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index de31643742d87..1ddb299598fd0 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -33,7 +33,8 @@ from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds,
is_leapyear)
from timedeltas import Timedelta
from timedeltas cimport delta_to_nanoseconds
-from timezones cimport get_timezone, is_utc, maybe_get_tz, treat_tz_as_pytz
+from timezones cimport (
+ get_timezone, is_utc, maybe_get_tz, treat_tz_as_pytz, tz_compare)
# ----------------------------------------------------------------------
# Constants
@@ -266,7 +267,7 @@ cdef class _Timestamp(datetime):
other = Timestamp(other)
# validate tz's
- if get_timezone(self.tzinfo) != get_timezone(other.tzinfo):
+ if not tz_compare(self.tzinfo, other.tzinfo):
raise TypeError("Timestamp subtraction must have the "
"same timezones or no timezones")
diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
index 95e0474b3a174..67353f3eec614 100644
--- a/pandas/_libs/tslibs/timezones.pxd
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -7,6 +7,7 @@ cdef bint is_tzlocal(object tz)
cdef bint treat_tz_as_pytz(object tz)
cdef bint treat_tz_as_dateutil(object tz)
+cpdef bint tz_compare(object start, object end)
cpdef object get_timezone(object tz)
cpdef object maybe_get_tz(object tz)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index fdcf40337fab9..242b8262a8721 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -275,7 +275,7 @@ cdef object get_dst_info(object tz):
def infer_tzinfo(start, end):
if start is not None and end is not None:
tz = start.tzinfo
- if not (get_timezone(tz) == get_timezone(end.tzinfo)):
+ if not tz_compare(tz, end.tzinfo):
msg = 'Inputs must both have the same timezone, {tz1} != {tz2}'
raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo))
elif start is not None:
@@ -285,3 +285,32 @@ def infer_tzinfo(start, end):
else:
tz = None
return tz
+
+
+cpdef bint tz_compare(object start, object end):
+ """
+ Compare string representations of timezones
+
+ The same timezone can be represented as different instances of
+ timezones. For example
+ `<DstTzInfo 'Europe/Paris' LMT+0:09:00 STD>` and
+ `<DstTzInfo 'Europe/Paris' CET+1:00:00 STD>` are essentially same
+ timezones but aren't evaluted such, but the string representation
+ for both of these is `'Europe/Paris'`.
+
+ This exists only to add a notion of equality to pytz-style zones
+ that is compatible with the notion of equality expected of tzinfo
+ subclasses.
+
+ Parameters
+ ----------
+ start : tzinfo
+ end : tzinfo
+
+ Returns:
+ -------
+ compare : bint
+
+ """
+ # GH 18523
+ return get_timezone(start) == get_timezone(end)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index d83d2d2c93ec8..4ec929947783c 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -521,8 +521,7 @@ def _generate(cls, start, end, periods, name, offset,
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
- if not (timezones.get_timezone(inferred_tz) ==
- timezones.get_timezone(tz)):
+ if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
@@ -1192,7 +1191,7 @@ def _maybe_utc_convert(self, other):
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
- if self.tz != other.tz:
+ if not timezones.tz_compare(self.tz, other.tz):
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
@@ -1296,7 +1295,7 @@ def __iter__(self):
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
- if self.tz != other.tz:
+ if not timezones.tz_compare(self.tz, other.tz):
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 150410e404305..7e126dd56775b 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -2074,6 +2074,45 @@ def test_concat_order(self):
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
+ def test_concat_datetime_timezone(self):
+ # GH 18523
+ idx1 = pd.date_range('2011-01-01', periods=3, freq='H',
+ tz='Europe/Paris')
+ idx2 = pd.date_range(start=idx1[0], end=idx1[-1], freq='H')
+ df1 = pd.DataFrame({'a': [1, 2, 3]}, index=idx1)
+ df2 = pd.DataFrame({'b': [1, 2, 3]}, index=idx2)
+ result = pd.concat([df1, df2], axis=1)
+
+ exp_idx = DatetimeIndex(['2011-01-01 00:00:00+01:00',
+ '2011-01-01 01:00:00+01:00',
+ '2011-01-01 02:00:00+01:00'],
+ freq='H'
+ ).tz_localize('UTC').tz_convert('Europe/Paris')
+
+ expected = pd.DataFrame([[1, 1], [2, 2], [3, 3]],
+ index=exp_idx, columns=['a', 'b'])
+
+ tm.assert_frame_equal(result, expected)
+
+ idx3 = pd.date_range('2011-01-01', periods=3,
+ freq='H', tz='Asia/Tokyo')
+ df3 = pd.DataFrame({'b': [1, 2, 3]}, index=idx3)
+ result = pd.concat([df1, df3], axis=1)
+
+ exp_idx = DatetimeIndex(['2010-12-31 15:00:00+00:00',
+ '2010-12-31 16:00:00+00:00',
+ '2010-12-31 17:00:00+00:00',
+ '2010-12-31 23:00:00+00:00',
+ '2011-01-01 00:00:00+00:00',
+ '2011-01-01 01:00:00+00:00']
+ ).tz_localize('UTC')
+
+ expected = pd.DataFrame([[np.nan, 1], [np.nan, 2], [np.nan, 3],
+ [1, np.nan], [2, np.nan], [3, np.nan]],
+ index=exp_idx, columns=['a', 'b'])
+
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
@pytest.mark.parametrize('dt', np.sctypes['float'])
| closes #18523
superseded #18596 | https://api.github.com/repos/pandas-dev/pandas/pulls/19281 | 2018-01-17T11:39:19Z | 2018-01-18T00:14:03Z | 2018-01-18T00:14:03Z | 2018-01-18T00:14:10Z |
DOC: add missing period to DataFrame docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2c05eefa5706e..35cc7a2a34acb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -250,7 +250,7 @@ class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
- container for Series objects. The primary pandas data structure
+ container for Series objects. The primary pandas data structure.
Parameters
----------
| - [x] closes #19141
Went ahead and did this one manually. Working on a regex that will apply this to all docstrings in general and will make a new PR when it's ready.
| https://api.github.com/repos/pandas-dev/pandas/pulls/19280 | 2018-01-17T07:35:25Z | 2018-01-17T11:46:45Z | 2018-01-17T11:46:45Z | 2018-01-17T11:46:47Z |
remove unused block attribute | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 43fdd454250a5..bc75a110354c0 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -97,7 +97,6 @@ class Block(PandasObject):
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
- _downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
@@ -1841,7 +1840,6 @@ def equals(self, other):
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
- _downcast_dtype = 'int64'
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
| grepping across the code, `_downcast_dtype` doesn't show up anywhere else.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19279 | 2018-01-17T07:33:09Z | 2018-01-18T00:42:04Z | 2018-01-18T00:42:04Z | 2018-02-11T21:59:04Z |
Remove timeop | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 853d5cee11cd1..ca5385ee4f857 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -310,6 +310,7 @@ Other API Changes
- :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`)
- Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`)
- :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`)
+- Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`)
.. _whatsnew_0230.deprecations:
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 866329b16c830..3e671731be348 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -393,7 +393,7 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False):
if opstr in ['__floordiv__']:
result = left // right
else:
- result = op(left, float(right))
+ result = op(left, np.float64(right))
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index fc3ea106252db..fc04d9d291bf9 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -6,13 +6,12 @@
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
-import warnings
+
import numpy as np
import pandas as pd
-import datetime
from pandas._libs import (lib, index as libindex,
- tslib as libts, algos as libalgos, iNaT)
+ algos as libalgos)
from pandas import compat
from pandas.util._decorators import Appender
@@ -20,7 +19,7 @@
from pandas.compat import bind_method
import pandas.core.missing as missing
-from pandas.errors import PerformanceWarning, NullFrequencyError
+from pandas.errors import NullFrequencyError
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.core.dtypes.missing import notna, isna
from pandas.core.dtypes.common import (
@@ -28,9 +27,9 @@
is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
- is_datetime64_dtype, is_datetime64tz_dtype, is_datetime64_ns_dtype,
- is_bool_dtype, is_datetimetz,
- is_list_like, is_offsetlike,
+ is_datetime64_dtype, is_datetime64tz_dtype,
+ is_bool_dtype,
+ is_list_like,
is_scalar,
_ensure_object)
from pandas.core.dtypes.cast import (
@@ -39,7 +38,7 @@
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
- ABCIndex, ABCDatetimeIndex,
+ ABCIndex,
ABCPeriodIndex)
# -----------------------------------------------------------------------------
@@ -294,287 +293,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method,
exclude=exclude)
-class _Op(object):
-
- """
- Wrapper around Series arithmetic operations.
- Generally, you should use classmethod ``_Op.get_op`` as an entry point.
-
- This validates and coerces lhs and rhs depending on its dtype and
- based on op. See _TimeOp also.
-
- Parameters
- ----------
- left : Series
- lhs of op
- right : object
- rhs of op
- name : str
- name of op
- na_op : callable
- a function which wraps op
- """
-
- fill_value = np.nan
- wrap_results = staticmethod(lambda x: x)
- dtype = None
-
- def __init__(self, left, right, name, na_op):
- self.left = left
- self.right = right
-
- self.name = name
- self.na_op = na_op
-
- self.lvalues = left
- self.rvalues = right
-
- @classmethod
- def get_op(cls, left, right, name, na_op):
- """
- Get op dispatcher, returns _Op or _TimeOp.
-
- If ``left`` and ``right`` are appropriate for datetime arithmetic with
- operation ``name``, processes them and returns a ``_TimeOp`` object
- that stores all the required values. Otherwise, it will generate
- either a ``_Op``, indicating that the operation is performed via
- normal numpy path.
- """
- is_timedelta_lhs = is_timedelta64_dtype(left)
-
- if not is_timedelta_lhs:
- return _Op(left, right, name, na_op)
- else:
- return _TimeOp(left, right, name, na_op)
-
-
-class _TimeOp(_Op):
- """
- Wrapper around Series datetime/time/timedelta arithmetic operations.
- Generally, you should use classmethod ``_Op.get_op`` as an entry point.
- """
- fill_value = iNaT
-
- def __init__(self, left, right, name, na_op):
- super(_TimeOp, self).__init__(left, right, name, na_op)
-
- lvalues = self._convert_to_array(left, name=name)
- rvalues = self._convert_to_array(right, name=name, other=lvalues)
-
- # left
- self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
- assert self.is_timedelta_lhs
-
- # right
- self.is_offset_rhs = is_offsetlike(right)
- self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
- self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
- self.is_datetime_rhs = (self.is_datetime64_rhs or
- self.is_datetime64tz_rhs)
- self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
- self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
- self.is_floating_rhs = rvalues.dtype.kind == 'f'
-
- self._validate(lvalues, rvalues, name)
- self.lvalues, self.rvalues = self._convert_for_datetime(lvalues,
- rvalues)
-
- def _validate_timedelta(self, name):
- # assumes self.is_timedelta_lhs
-
- if self.is_integer_rhs or self.is_floating_rhs:
- # timedelta and integer mul/div
- self._check_timedelta_with_numeric(name)
- elif self.is_timedelta_rhs or self.is_offset_rhs:
- # 2 timedeltas
- if name not in ('__div__', '__rdiv__', '__truediv__',
- '__rtruediv__', '__add__', '__radd__', '__sub__',
- '__rsub__', '__floordiv__', '__rfloordiv__'):
- raise TypeError("can only operate on a timedeltas for addition"
- ", subtraction, and division, but the operator"
- " [{name}] was passed".format(name=name))
- elif self.is_datetime_rhs:
- if name not in ('__add__', '__radd__', '__rsub__'):
- raise TypeError("can only operate on a timedelta/DateOffset "
- "with a rhs of a datetime for addition, "
- "but the operator [{name}] was passed"
- .format(name=name))
- else:
- raise TypeError('cannot operate on a series without a rhs '
- 'of a series/ndarray of type datetime64[ns] '
- 'or a timedelta')
-
- def _validate(self, lvalues, rvalues, name):
- return self._validate_timedelta(name)
-
- def _check_timedelta_with_numeric(self, name):
- if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
- raise TypeError("can only operate on a timedelta and an "
- "integer or a float for division and "
- "multiplication, but the operator [{name}] "
- "was passed".format(name=name))
-
- def _convert_to_array(self, values, name=None, other=None):
- """converts values to ndarray"""
- from pandas.core.tools.timedeltas import to_timedelta
-
- ovalues = values
- supplied_dtype = None
- if not is_list_like(values):
- values = np.array([values])
-
- # if this is a Series that contains relevant dtype info, then use this
- # instead of the inferred type; this avoids coercing Series([NaT],
- # dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
- elif (isinstance(values, (pd.Series, ABCDatetimeIndex)) and
- (is_timedelta64_dtype(values) or is_datetime64_dtype(values))):
- supplied_dtype = values.dtype
-
- inferred_type = lib.infer_dtype(values)
- if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or
- is_datetimetz(inferred_type)):
- # if we have a other of timedelta, but use pd.NaT here we
- # we are in the wrong path
- if (supplied_dtype is None and other is not None and
- (other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and
- isna(values).all()):
- values = np.empty(values.shape, dtype='timedelta64[ns]')
- values[:] = iNaT
-
- elif isinstance(values, ABCDatetimeIndex):
- # a datelike
- pass
- elif isinstance(ovalues, datetime.datetime):
- # datetime scalar
- values = pd.DatetimeIndex(values)
- # datetime array with tz
- elif is_datetimetz(values):
- if isinstance(values, ABCSeries):
- values = values._values
- elif not (isinstance(values, (np.ndarray, ABCSeries)) and
- is_datetime64_dtype(values)):
- values = libts.array_to_datetime(values)
- elif (is_datetime64_dtype(values) and
- not is_datetime64_ns_dtype(values)):
- # GH#7996 e.g. np.datetime64('2013-01-01') is datetime64[D]
- values = values.astype('datetime64[ns]')
-
- elif inferred_type in ('timedelta', 'timedelta64'):
- # have a timedelta, convert to to ns here
- values = to_timedelta(values, errors='coerce', box=False)
- if isinstance(other, ABCDatetimeIndex):
- # GH#13905
- # Defer to DatetimeIndex/TimedeltaIndex operations where
- # timezones are handled carefully.
- values = pd.TimedeltaIndex(values)
- elif inferred_type == 'integer':
- # py3 compat where dtype is 'm' but is an integer
- if values.dtype.kind == 'm':
- values = values.astype('timedelta64[ns]')
- elif isinstance(values, pd.PeriodIndex):
- values = values.to_timestamp().to_series()
- elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
- raise TypeError("incompatible type for a datetime/timedelta "
- "operation [{name}]".format(name=name))
- elif inferred_type == 'floating':
- if (isna(values).all() and
- name in ('__add__', '__radd__', '__sub__', '__rsub__')):
- values = np.empty(values.shape, dtype=other.dtype)
- values[:] = iNaT
- return values
- elif is_offsetlike(values):
- return values
- else:
- raise TypeError("incompatible type [{dtype}] for a "
- "datetime/timedelta operation"
- .format(dtype=np.array(values).dtype))
-
- return values
-
- def _convert_for_datetime(self, lvalues, rvalues):
- from pandas.core.tools.timedeltas import to_timedelta
-
- mask = isna(lvalues) | isna(rvalues)
-
- # datetimes require views
- if self.is_datetime_rhs:
-
- # datetime subtraction means timedelta
- if self.is_datetime64tz_rhs:
- self.dtype = rvalues.dtype
- else:
- self.dtype = 'datetime64[ns]'
-
- # if adding single offset try vectorized path
- # in DatetimeIndex; otherwise elementwise apply
- def _offset(lvalues, rvalues):
- if len(lvalues) == 1:
- rvalues = pd.DatetimeIndex(rvalues)
- lvalues = lvalues[0]
- else:
- warnings.warn("Adding/subtracting array of DateOffsets to "
- "Series not vectorized", PerformanceWarning)
- rvalues = rvalues.astype('O')
-
- # pass thru on the na_op
- self.na_op = lambda x, y: getattr(x, self.name)(y)
- return lvalues, rvalues
-
- if self.is_offset_rhs:
- rvalues, lvalues = _offset(rvalues, lvalues)
- else:
-
- # with tz, convert to UTC
- if self.is_datetime64tz_rhs:
- rvalues = rvalues.tz_convert('UTC').tz_localize(None)
-
- lvalues = lvalues.view(np.int64)
- rvalues = rvalues.view(np.int64)
-
- # otherwise it's a timedelta
- else:
-
- self.dtype = 'timedelta64[ns]'
-
- # convert Tick DateOffset to underlying delta
- if self.is_offset_rhs:
- rvalues = to_timedelta(rvalues, box=False)
-
- lvalues = lvalues.astype(np.int64)
- if not self.is_floating_rhs:
- rvalues = rvalues.astype(np.int64)
-
- # time delta division -> unit less
- # integer gets converted to timedelta in np < 1.6
- if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and
- not self.is_integer_rhs and
- self.name in ('__div__', '__rdiv__',
- '__truediv__', '__rtruediv__',
- '__floordiv__', '__rfloordiv__')):
- self.dtype = 'float64'
- self.fill_value = np.nan
- lvalues = lvalues.astype(np.float64)
- rvalues = rvalues.astype(np.float64)
-
- # if we need to mask the results
- if mask.any():
-
- def f(x):
-
- # datetime64[ns]/timedelta64[ns] masking
- try:
- x = np.array(x, dtype=self.dtype)
- except TypeError:
- x = np.array(x, dtype='datetime64[ns]')
-
- np.putmask(x, mask, self.fill_value)
- return x
-
- self.wrap_results = f
-
- return lvalues, rvalues
-
-
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
@@ -678,26 +396,22 @@ def wrapper(left, right, name=name, na_op=na_op):
index=left.index, name=res_name,
dtype=result.dtype)
- converted = _Op.get_op(left, right, name, na_op)
-
- lvalues, rvalues = converted.lvalues, converted.rvalues
- dtype = converted.dtype
- wrap_results = converted.wrap_results
- na_op = converted.na_op
+ elif is_timedelta64_dtype(left):
+ result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
+ res_name = _get_series_op_result_name(left, right)
+ return construct_result(left, result,
+ index=left.index, name=res_name,
+ dtype=result.dtype)
+ lvalues = left.values
+ rvalues = right
if isinstance(rvalues, ABCSeries):
- lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
- # _Op aligns left and right
- else:
- if (hasattr(lvalues, 'values') and
- not isinstance(lvalues, ABCDatetimeIndex)):
- lvalues = lvalues.values
- result = wrap_results(safe_na_op(lvalues, rvalues))
+ result = safe_na_op(lvalues, rvalues)
res_name = _get_series_op_result_name(left, right)
return construct_result(left, result,
- index=left.index, name=res_name, dtype=dtype)
+ index=left.index, name=res_name, dtype=None)
return wrapper
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 962de91ed0581..44f48f3ea9833 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -652,14 +652,14 @@ def test_timedelta_ops_with_missing_values(self):
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
- actual = s1 + NA
- tm.assert_series_equal(actual, sn)
- actual = NA + s1
- tm.assert_series_equal(actual, sn)
- actual = s1 - NA
- tm.assert_series_equal(actual, sn)
- actual = -NA + s1
- tm.assert_series_equal(actual, sn)
+ with pytest.raises(TypeError):
+ s1 + np.nan
+ with pytest.raises(TypeError):
+ np.nan + s1
+ with pytest.raises(TypeError):
+ s1 - np.nan
+ with pytest.raises(TypeError):
+ -np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index c06435d4b8c42..7505e6b0cec3b 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1108,7 +1108,7 @@ def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
- pattern = 'operate|unsupported|cannot'
+ pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
| We're finally able to remove `ops._Op` and `ops._TimeOp`. This PR makes one final compatibility fix:
ATM:
```
tdi = pd.TimedeltaIndex(['1 Day'])
ser = pd.Series(tdi)
>>> tdi + np.nan
TypeError: unsupported operand type(s) for +: 'TimedeltaIndex' and 'float'
>>> ser + np.nan
0 NaT
dtype: timedelta64[ns]
```
Same deal for subtracting np.nan. See #19274. This makes the TimedeltaIndex canonical and updates the appropriate Series tests.
- [x] closes #19274
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19277 | 2018-01-17T06:39:10Z | 2018-01-19T11:32:36Z | 2018-01-19T11:32:36Z | 2018-01-19T16:06:02Z |
Fix tzawareness_compat for DatetimeIndex comparisons with NaT | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 853d5cee11cd1..e8923a4c0bc20 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -431,7 +431,7 @@ Conversion
-
- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
-
+- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`)
Indexing
^^^^^^^^
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index d83d2d2c93ec8..978674b9d2a8d 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -666,7 +666,10 @@ def _assert_tzawareness_compat(self, other):
if is_datetime64tz_dtype(other):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
- if self.tz is None:
+ if other is libts.NaT:
+ # pd.NaT quacks both aware and naive
+ pass
+ elif self.tz is None:
if other_tz is not None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects.')
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 41cd654cf22b9..e3ebb8769db02 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -286,6 +286,21 @@ def test_comparison_tzawareness_compat(self, op):
with pytest.raises(TypeError):
op(dz, ts)
+ @pytest.mark.parametrize('op', [operator.eq, operator.ne,
+ operator.gt, operator.ge,
+ operator.lt, operator.le])
+ def test_nat_comparison_tzawareness(self, op):
+ # GH#19276
+ # tzaware DatetimeIndex should not raise when compared to NaT
+ dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
+ '2014-05-01', '2014-07-01'])
+ expected = np.array([op == operator.ne] * len(dti))
+ result = op(dti, pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = op(dti.tz_localize('US/Pacific'), pd.NaT)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
| ATM `pd.date_range('2016-01-01', periods=1, tz='US/Pacific') < pd.NaT` raises `TypeError` because `NaT` is tz-naive. This fixes that.
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19276 | 2018-01-17T06:30:29Z | 2018-01-18T00:32:46Z | 2018-01-18T00:32:46Z | 2018-02-11T21:59:05Z |
TST: Clean up DataFrame.to_csv compression tests | diff --git a/pandas/tests/conftest.py b/pandas/tests/conftest.py
new file mode 100644
index 0000000000000..8f5d963927f60
--- /dev/null
+++ b/pandas/tests/conftest.py
@@ -0,0 +1,11 @@
+import pytest
+import pandas.util._test_decorators as td
+
+
+@pytest.fixture(params=[None, 'gzip', 'bz2',
+ pytest.param('xz', marks=td.skip_if_no_lzma)])
+def compression(request):
+ """
+ Fixture for trying common compression types in compression tests
+ """
+ return request.param
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 0ca25735fc03f..3fd07869c4159 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -21,7 +21,6 @@
ensure_clean,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
@@ -920,73 +919,28 @@ def test_to_csv_path_is_none(self):
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
- def test_to_csv_compression_gzip(self):
- # GH7615
- # use the compression kw in to_csv
- df = DataFrame([[0.123456, 0.234567, 0.567567],
- [12.32112, 123123.2, 321321.2]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
-
- with ensure_clean() as filename:
-
- df.to_csv(filename, compression="gzip")
-
- # test the round trip - to_csv -> read_csv
- rs = read_csv(filename, compression="gzip", index_col=0)
- assert_frame_equal(df, rs)
-
- # explicitly make sure file is gziped
- import gzip
- f = gzip.open(filename, 'rb')
- text = f.read().decode('utf8')
- f.close()
- for col in df.columns:
- assert col in text
+ def test_to_csv_compression(self, compression):
- def test_to_csv_compression_bz2(self):
- # GH7615
- # use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
- df.to_csv(filename, compression="bz2")
+ df.to_csv(filename, compression=compression)
# test the round trip - to_csv -> read_csv
- rs = read_csv(filename, compression="bz2", index_col=0)
+ rs = read_csv(filename, compression=compression, index_col=0)
assert_frame_equal(df, rs)
- # explicitly make sure file is bz2ed
- import bz2
- f = bz2.BZ2File(filename, 'rb')
- text = f.read().decode('utf8')
- f.close()
- for col in df.columns:
- assert col in text
-
- @td.skip_if_no_lzma
- def test_to_csv_compression_xz(self):
- # GH11852
- # use the compression kw in to_csv
- df = DataFrame([[0.123456, 0.234567, 0.567567],
- [12.32112, 123123.2, 321321.2]],
- index=['A', 'B'], columns=['X', 'Y', 'Z'])
-
- with ensure_clean() as filename:
-
- df.to_csv(filename, compression="xz")
-
- # test the round trip - to_csv -> read_csv
- rs = read_csv(filename, compression="xz", index_col=0)
- assert_frame_equal(df, rs)
+ # explicitly make sure file is compressed
+ with tm.decompress_file(filename, compression) as fh:
+ text = fh.read().decode('utf8')
+ for col in df.columns:
+ assert col in text
- # explicitly make sure file is xzipped
- lzma = compat.import_lzma()
- f = lzma.open(filename, 'rb')
- assert_frame_equal(df, read_csv(f, index_col=0))
- f.close()
+ with tm.decompress_file(filename, compression) as fh:
+ assert_frame_equal(df, read_csv(fh, index_col=0))
def test_to_csv_compression_value_error(self):
# GH7615
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 2d0a23d71a2e6..10f6cef04b593 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -15,15 +15,17 @@
@pytest.mark.network
@pytest.mark.parametrize(
- "compression,extension", [
+ "compress_type, extension", [
('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz', marks=td.skip_if_no_lzma)
]
)
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
-def test_compressed_urls(salaries_table, compression, extension, mode, engine):
- check_compressed_urls(salaries_table, compression, extension, mode, engine)
+def test_compressed_urls(salaries_table, compress_type, extension, mode,
+ engine):
+ check_compressed_urls(salaries_table, compress_type, extension, mode,
+ engine)
@tm.network
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 99dcc9272bf11..ec26716f79446 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -14,7 +14,6 @@
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from .common import TestData
@@ -139,12 +138,6 @@ def test_to_csv_path_is_none(self):
csv_str = s.to_csv(path=None)
assert isinstance(csv_str, str)
- @pytest.mark.parametrize('compression', [
- None,
- 'gzip',
- 'bz2',
- pytest.param('xz', marks=td.skip_if_no_lzma),
- ])
def test_to_csv_compression(self, compression):
s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'],
@@ -160,14 +153,13 @@ def test_to_csv_compression(self, compression):
assert_series_equal(s, rs)
# explicitly ensure file was compressed
- f = tm.decompress_file(filename, compression=compression)
- text = f.read().decode('utf8')
- assert s.name in text
- f.close()
-
- f = tm.decompress_file(filename, compression=compression)
- assert_series_equal(s, pd.read_csv(f, index_col=0, squeeze=True))
- f.close()
+ with tm.decompress_file(filename, compression=compression) as fh:
+ text = fh.read().decode('utf8')
+ assert s.name in text
+
+ with tm.decompress_file(filename, compression=compression) as fh:
+ assert_series_equal(s, pd.read_csv(fh,
+ index_col=0, squeeze=True))
class TestSeriesIO(TestData):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 1bea25a16ca1e..3567754371da3 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -162,6 +162,7 @@ def round_trip_localpath(writer, reader, path=None):
return obj
+@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
@@ -194,7 +195,7 @@ def decompress_file(path, compression):
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
- return f
+ yield f
def assert_almost_equal(left, right, check_exact=False,
| xref #19226
Parametrized some of the compression tests in ``tests/frame/test_to_csv.py`` and used the new ``decompress_file`` function. | https://api.github.com/repos/pandas-dev/pandas/pulls/19273 | 2018-01-16T19:18:48Z | 2018-01-21T15:29:33Z | 2018-01-21T15:29:33Z | 2018-01-22T13:58:11Z |
REF: Move pandas.core.categorical | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index cf63b5083885e..75cf0a88e37c1 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -55,7 +55,7 @@ from pandas.core.dtypes.common import (
is_bool_dtype, is_object_dtype,
is_datetime64_dtype,
pandas_dtype)
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core.dtypes.concat import union_categoricals
import pandas.io.common as com
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 07b34961ce25d..f651fbbf56316 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -108,7 +108,11 @@ def load_reduce(self):
('pandas.tseries.index', 'DatetimeIndex'):
('pandas.core.indexes.datetimes', 'DatetimeIndex'),
('pandas.tseries.period', 'PeriodIndex'):
- ('pandas.core.indexes.period', 'PeriodIndex')
+ ('pandas.core.indexes.period', 'PeriodIndex'),
+
+ # 19269, arrays moving
+ ('pandas.core.categorical', 'Categorical'):
+ ('pandas.core.arrays', 'Categorical'),
}
diff --git a/pandas/core/api.py b/pandas/core/api.py
index b228a97c99074..aa37ddffa1156 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -6,7 +6,7 @@
from pandas.core.algorithms import factorize, unique, value_counts
from pandas.core.dtypes.missing import isna, isnull, notna, notnull
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core.groupby import Grouper
from pandas.io.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py
new file mode 100644
index 0000000000000..ee32b12f0e712
--- /dev/null
+++ b/pandas/core/arrays/__init__.py
@@ -0,0 +1 @@
+from .categorical import Categorical # noqa
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
new file mode 100644
index 0000000000000..708f903cd73cb
--- /dev/null
+++ b/pandas/core/arrays/categorical.py
@@ -0,0 +1,2331 @@
+# pylint: disable=E1101,W0232
+
+import numpy as np
+from warnings import warn
+import types
+
+from pandas import compat
+from pandas.compat import u, lzip
+from pandas._libs import lib, algos as libalgos
+
+from pandas.core.dtypes.generic import (
+ ABCSeries, ABCIndexClass, ABCCategoricalIndex)
+from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.cast import (
+ maybe_infer_to_datetimelike,
+ coerce_indexer_dtype)
+from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.core.dtypes.common import (
+ _ensure_int64,
+ _ensure_object,
+ _ensure_platform_int,
+ is_dtype_equal,
+ is_datetimelike,
+ is_datetime64_dtype,
+ is_timedelta64_dtype,
+ is_categorical,
+ is_categorical_dtype,
+ is_list_like, is_sequence,
+ is_scalar,
+ is_dict_like)
+from pandas.core.common import is_null_slice, _maybe_box_datetimelike
+
+from pandas.core.algorithms import factorize, take_1d, unique1d
+from pandas.core.accessor import PandasDelegate
+from pandas.core.base import (PandasObject,
+ NoNewAttributesMixin, _shared_docs)
+import pandas.core.common as com
+from pandas.core.missing import interpolate_2d
+from pandas.compat.numpy import function as nv
+from pandas.util._decorators import (
+ Appender, cache_readonly, deprecate_kwarg, Substitution)
+
+from pandas.io.formats.terminal import get_terminal_size
+from pandas.util._validators import validate_bool_kwarg
+from pandas.core.config import get_option
+
+
+def _cat_compare_op(op):
+ def f(self, other):
+ # On python2, you can usually compare any type to any type, and
+ # Categoricals can be seen as a custom type, but having different
+ # results depending whether categories are the same or not is kind of
+ # insane, so be a bit stricter here and use the python3 idea of
+ # comparing only things of equal type.
+ if not self.ordered:
+ if op in ['__lt__', '__gt__', '__le__', '__ge__']:
+ raise TypeError("Unordered Categoricals can only compare "
+ "equality or not")
+ if isinstance(other, Categorical):
+ # Two Categoricals can only be be compared if the categories are
+ # the same (maybe up to ordering, depending on ordered)
+
+ msg = ("Categoricals can only be compared if "
+ "'categories' are the same.")
+ if len(self.categories) != len(other.categories):
+ raise TypeError(msg + " Categories are different lengths")
+ elif (self.ordered and not (self.categories ==
+ other.categories).all()):
+ raise TypeError(msg)
+ elif not set(self.categories) == set(other.categories):
+ raise TypeError(msg)
+
+ if not (self.ordered == other.ordered):
+ raise TypeError("Categoricals can only be compared if "
+ "'ordered' is the same")
+ if not self.ordered and not self.categories.equals(
+ other.categories):
+ # both unordered and different order
+ other_codes = _get_codes_for_values(other, self.categories)
+ else:
+ other_codes = other._codes
+
+ na_mask = (self._codes == -1) | (other_codes == -1)
+ f = getattr(self._codes, op)
+ ret = f(other_codes)
+ if na_mask.any():
+ # In other series, the leads to False, so do that here too
+ ret[na_mask] = False
+ return ret
+
+ # Numpy-1.9 and earlier may convert a scalar to a zerodim array during
+ # comparison operation when second arg has higher priority, e.g.
+ #
+ # cat[0] < cat
+ #
+ # With cat[0], for example, being ``np.int64(1)`` by the time it gets
+ # into this function would become ``np.array(1)``.
+ other = lib.item_from_zerodim(other)
+ if is_scalar(other):
+ if other in self.categories:
+ i = self.categories.get_loc(other)
+ return getattr(self._codes, op)(i)
+ else:
+ if op == '__eq__':
+ return np.repeat(False, len(self))
+ elif op == '__ne__':
+ return np.repeat(True, len(self))
+ else:
+ msg = ("Cannot compare a Categorical for op {op} with a "
+ "scalar, which is not a category.")
+ raise TypeError(msg.format(op=op))
+ else:
+
+ # allow categorical vs object dtype array comparisons for equality
+ # these are only positional comparisons
+ if op in ['__eq__', '__ne__']:
+ return getattr(np.array(self), op)(np.array(other))
+
+ msg = ("Cannot compare a Categorical for op {op} with type {typ}."
+ "\nIf you want to compare values, use 'np.asarray(cat) "
+ "<op> other'.")
+ raise TypeError(msg.format(op=op, typ=type(other)))
+
+ f.__name__ = op
+
+ return f
+
+
+def _maybe_to_categorical(array):
+ """
+ Coerce to a categorical if a series is given.
+
+ Internal use ONLY.
+ """
+ if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
+ return array._values
+ elif isinstance(array, np.ndarray):
+ return Categorical(array)
+ return array
+
+
+_codes_doc = """The category codes of this categorical.
+
+Level codes are an array if integer which are the positions of the real
+values in the categories array.
+
+There is not setter, use the other categorical methods and the normal item
+setter to change values in the categorical.
+"""
+
+
+class Categorical(PandasObject):
+ """
+ Represents a categorical variable in classic R / S-plus fashion
+
+ `Categoricals` can only take on only a limited, and usually fixed, number
+ of possible values (`categories`). In contrast to statistical categorical
+ variables, a `Categorical` might have an order, but numerical operations
+ (additions, divisions, ...) are not possible.
+
+ All values of the `Categorical` are either in `categories` or `np.nan`.
+ Assigning values outside of `categories` will raise a `ValueError`. Order
+ is defined by the order of the `categories`, not lexical order of the
+ values.
+
+ Parameters
+ ----------
+ values : list-like
+ The values of the categorical. If categories are given, values not in
+ categories will be replaced with NaN.
+ categories : Index-like (unique), optional
+ The unique categories for this categorical. If not given, the
+ categories are assumed to be the unique values of values.
+ ordered : boolean, (default False)
+ Whether or not this categorical is treated as a ordered categorical.
+ If not given, the resulting categorical will not be ordered.
+ dtype : CategoricalDtype
+ An instance of ``CategoricalDtype`` to use for this categorical
+
+ .. versionadded:: 0.21.0
+
+ Attributes
+ ----------
+ categories : Index
+ The categories of this categorical
+ codes : ndarray
+ The codes (integer positions, which point to the categories) of this
+ categorical, read only.
+ ordered : boolean
+ Whether or not this Categorical is ordered.
+ dtype : CategoricalDtype
+ The instance of ``CategoricalDtype`` storing the ``categories``
+ and ``ordered``.
+
+ .. versionadded:: 0.21.0
+
+ Methods
+ -------
+ from_codes
+ __array__
+
+ Raises
+ ------
+ ValueError
+ If the categories do not validate.
+ TypeError
+ If an explicit ``ordered=True`` is given but no `categories` and the
+ `values` are not sortable.
+
+ Examples
+ --------
+ >>> pd.Categorical([1, 2, 3, 1, 2, 3])
+ [1, 2, 3, 1, 2, 3]
+ Categories (3, int64): [1, 2, 3]
+
+ >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ [a, b, c, a, b, c]
+ Categories (3, object): [a, b, c]
+
+ Ordered `Categoricals` can be sorted according to the custom order
+ of the categories and can have a min and max value.
+
+ >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
+ ... categories=['c', 'b', 'a'])
+ >>> c
+ [a, b, c, a, b, c]
+ Categories (3, object): [c < b < a]
+ >>> c.min()
+ 'c'
+
+ Notes
+ -----
+ See the `user guide
+ <http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
+
+ See also
+ --------
+ pandas.api.types.CategoricalDtype : Type for categorical data
+ CategoricalIndex : An Index with an underlying ``Categorical``
+ """
+
+ # For comparisons, so that numpy uses our implementation if the compare
+ # ops, which raise
+ __array_priority__ = 1000
+ _dtype = CategoricalDtype()
+ _deprecations = frozenset(['labels'])
+ _typ = 'categorical'
+
+ def __init__(self, values, categories=None, ordered=None, dtype=None,
+ fastpath=False):
+
+ # Ways of specifying the dtype (prioritized ordered)
+ # 1. dtype is a CategoricalDtype
+ # a.) with known categories, use dtype.categories
+ # b.) else with Categorical values, use values.dtype
+ # c.) else, infer from values
+ # d.) specifying dtype=CategoricalDtype and categories is an error
+ # 2. dtype is a string 'category'
+ # a.) use categories, ordered
+ # b.) use values.dtype
+ # c.) infer from values
+ # 3. dtype is None
+ # a.) use categories, ordered
+ # b.) use values.dtype
+ # c.) infer from values
+
+ if dtype is not None:
+ # The dtype argument takes precedence over values.dtype (if any)
+ if isinstance(dtype, compat.string_types):
+ if dtype == 'category':
+ dtype = CategoricalDtype(categories, ordered)
+ else:
+ msg = "Unknown `dtype` {dtype}"
+ raise ValueError(msg.format(dtype=dtype))
+ elif categories is not None or ordered is not None:
+ raise ValueError("Cannot specify both `dtype` and `categories`"
+ " or `ordered`.")
+
+ categories = dtype.categories
+ ordered = dtype.ordered
+
+ elif is_categorical(values):
+ # If no "dtype" was passed, use the one from "values", but honor
+ # the "ordered" and "categories" arguments
+ dtype = values.dtype._from_categorical_dtype(values.dtype,
+ categories, ordered)
+ else:
+ # If dtype=None and values is not categorical, create a new dtype
+ dtype = CategoricalDtype(categories, ordered)
+
+ # At this point, dtype is always a CategoricalDtype
+ # if dtype.categories is None, we are inferring
+
+ if fastpath:
+ self._codes = coerce_indexer_dtype(values, categories)
+ self._dtype = dtype
+ return
+
+ # null_mask indicates missing values we want to exclude from inference.
+ # This means: only missing values in list-likes (not arrays/ndframes).
+ null_mask = np.array(False)
+
+ # sanitize input
+ if is_categorical_dtype(values):
+ if dtype.categories is None:
+ dtype = CategoricalDtype(values.categories, dtype.ordered)
+
+ elif not isinstance(values, (ABCIndexClass, ABCSeries)):
+ # _sanitize_array coerces np.nan to a string under certain versions
+ # of numpy
+ values = maybe_infer_to_datetimelike(values, convert_dates=True)
+ if not isinstance(values, np.ndarray):
+ values = _convert_to_list_like(values)
+ from pandas.core.series import _sanitize_array
+ # By convention, empty lists result in object dtype:
+ if len(values) == 0:
+ sanitize_dtype = 'object'
+ else:
+ sanitize_dtype = None
+ null_mask = isna(values)
+ if null_mask.any():
+ values = [values[idx] for idx in np.where(~null_mask)[0]]
+ values = _sanitize_array(values, None, dtype=sanitize_dtype)
+
+ if dtype.categories is None:
+ try:
+ codes, categories = factorize(values, sort=True)
+ except TypeError:
+ codes, categories = factorize(values, sort=False)
+ if dtype.ordered:
+ # raise, as we don't have a sortable data structure and so
+ # the user should give us one by specifying categories
+ raise TypeError("'values' is not ordered, please "
+ "explicitly specify the categories order "
+ "by passing in a categories argument.")
+ except ValueError:
+
+ # FIXME
+ raise NotImplementedError("> 1 ndim Categorical are not "
+ "supported at this time")
+
+ # we're inferring from values
+ dtype = CategoricalDtype(categories, dtype.ordered)
+
+ elif is_categorical_dtype(values):
+ old_codes = (values.cat.codes if isinstance(values, ABCSeries)
+ else values.codes)
+ codes = _recode_for_categories(old_codes, values.dtype.categories,
+ dtype.categories)
+
+ else:
+ codes = _get_codes_for_values(values, dtype.categories)
+
+ if null_mask.any():
+ # Reinsert -1 placeholders for previously removed missing values
+ full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
+ full_codes[~null_mask] = codes
+ codes = full_codes
+
+ self._dtype = dtype
+ self._codes = coerce_indexer_dtype(codes, dtype.categories)
+
+ @property
+ def categories(self):
+ """The categories of this categorical.
+
+ Setting assigns new values to each category (effectively a rename of
+ each individual category).
+
+ The assigned value has to be a list-like object. All items must be
+ unique and the number of items in the new categories must be the same
+ as the number of items in the old categories.
+
+ Assigning to `categories` is a inplace operation!
+
+ Raises
+ ------
+ ValueError
+ If the new categories do not validate as categories or if the
+ number of new categories is unequal the number of old categories
+
+ See also
+ --------
+ rename_categories
+ reorder_categories
+ add_categories
+ remove_categories
+ remove_unused_categories
+ set_categories
+ """
+ return self.dtype.categories
+
+ @categories.setter
+ def categories(self, categories):
+ new_dtype = CategoricalDtype(categories, ordered=self.ordered)
+ if (self.dtype.categories is not None and
+ len(self.dtype.categories) != len(new_dtype.categories)):
+ raise ValueError("new categories need to have the same number of "
+ "items as the old categories!")
+ self._dtype = new_dtype
+
+ @property
+ def ordered(self):
+ """Whether the categories have an ordered relationship"""
+ return self.dtype.ordered
+
+ @property
+ def dtype(self):
+ """The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
+ return self._dtype
+
+ @property
+ def _constructor(self):
+ return Categorical
+
+ def copy(self):
+ """ Copy constructor. """
+ return self._constructor(values=self._codes.copy(),
+ categories=self.categories,
+ ordered=self.ordered,
+ fastpath=True)
+
+ def astype(self, dtype, copy=True):
+ """
+ Coerce this type to another dtype
+
+ Parameters
+ ----------
+ dtype : numpy dtype or pandas type
+ copy : bool, default True
+ By default, astype always returns a newly allocated object.
+ If copy is set to False and dtype is categorical, the original
+ object is returned.
+
+ .. versionadded:: 0.19.0
+
+ """
+ if is_categorical_dtype(dtype):
+ # GH 10696/18593
+ dtype = self.dtype._update_dtype(dtype)
+ self = self.copy() if copy else self
+ if dtype == self.dtype:
+ return self
+ return self._set_dtype(dtype)
+ return np.array(self, dtype=dtype, copy=copy)
+
+ @cache_readonly
+ def ndim(self):
+ """Number of dimensions of the Categorical """
+ return self._codes.ndim
+
+ @cache_readonly
+ def size(self):
+ """ return the len of myself """
+ return len(self)
+
+ @cache_readonly
+ def itemsize(self):
+ """ return the size of a single category """
+ return self.categories.itemsize
+
+ def tolist(self):
+ """
+ Return a list of the values.
+
+ These are each a scalar type, which is a Python scalar
+ (for str, int, float) or a pandas scalar
+ (for Timestamp/Timedelta/Interval/Period)
+ """
+ if is_datetimelike(self.categories):
+ return [_maybe_box_datetimelike(x) for x in self]
+ return np.array(self).tolist()
+
+ @property
+ def base(self):
+ """ compat, we are always our own object """
+ return None
+
+ @classmethod
+ def _from_inferred_categories(cls, inferred_categories, inferred_codes,
+ dtype):
+ """Construct a Categorical from inferred values
+
+ For inferred categories (`dtype` is None) the categories are sorted.
+ For explicit `dtype`, the `inferred_categories` are cast to the
+ appropriate type.
+
+ Parameters
+ ----------
+
+ inferred_categories : Index
+ inferred_codes : Index
+ dtype : CategoricalDtype or 'category'
+
+ Returns
+ -------
+ Categorical
+ """
+ from pandas import Index, to_numeric, to_datetime, to_timedelta
+
+ cats = Index(inferred_categories)
+
+ known_categories = (isinstance(dtype, CategoricalDtype) and
+ dtype.categories is not None)
+
+ if known_categories:
+ # Convert to a specialzed type with `dtype` if specified
+ if dtype.categories.is_numeric():
+ cats = to_numeric(inferred_categories, errors='coerce')
+ elif is_datetime64_dtype(dtype.categories):
+ cats = to_datetime(inferred_categories, errors='coerce')
+ elif is_timedelta64_dtype(dtype.categories):
+ cats = to_timedelta(inferred_categories, errors='coerce')
+
+ if known_categories:
+ # recode from observation oder to dtype.categories order
+ categories = dtype.categories
+ codes = _recode_for_categories(inferred_codes, cats, categories)
+ elif not cats.is_monotonic_increasing:
+ # sort categories and recode for unknown categories
+ unsorted = cats.copy()
+ categories = cats.sort_values()
+ codes = _recode_for_categories(inferred_codes, unsorted,
+ categories)
+ dtype = CategoricalDtype(categories, ordered=False)
+ else:
+ dtype = CategoricalDtype(cats, ordered=False)
+ codes = inferred_codes
+
+ return cls(codes, dtype=dtype, fastpath=True)
+
+ @classmethod
+ def from_codes(cls, codes, categories, ordered=False):
+ """
+ Make a Categorical type from codes and categories arrays.
+
+ This constructor is useful if you already have codes and categories and
+ so do not need the (computation intensive) factorization step, which is
+ usually done on the constructor.
+
+ If your data does not follow this convention, please use the normal
+ constructor.
+
+ Parameters
+ ----------
+ codes : array-like, integers
+ An integer array, where each integer points to a category in
+ categories or -1 for NaN
+ categories : index-like
+ The categories for the categorical. Items need to be unique.
+ ordered : boolean, (default False)
+ Whether or not this categorical is treated as a ordered
+ categorical. If not given, the resulting categorical will be
+ unordered.
+ """
+ try:
+ codes = np.asarray(codes, np.int64)
+ except (ValueError, TypeError):
+ raise ValueError(
+ "codes need to be convertible to an arrays of integers")
+
+ categories = CategoricalDtype._validate_categories(categories)
+
+ if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
+ raise ValueError("codes need to be between -1 and "
+ "len(categories)-1")
+
+ return cls(codes, categories=categories, ordered=ordered,
+ fastpath=True)
+
+ _codes = None
+
+ def _get_codes(self):
+ """ Get the codes.
+
+ Returns
+ -------
+ codes : integer array view
+ A non writable view of the `codes` array.
+ """
+ v = self._codes.view()
+ v.flags.writeable = False
+ return v
+
+ def _set_codes(self, codes):
+ """
+ Not settable by the user directly
+ """
+ raise ValueError("cannot set Categorical codes directly")
+
+ codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
+
+ def _set_categories(self, categories, fastpath=False):
+ """ Sets new categories inplace
+
+ Parameters
+ ----------
+ fastpath : boolean (default: False)
+ Don't perform validation of the categories for uniqueness or nulls
+
+ Examples
+ --------
+ >>> c = Categorical(['a', 'b'])
+ >>> c
+ [a, b]
+ Categories (2, object): [a, b]
+
+ >>> c._set_categories(pd.Index(['a', 'c']))
+ >>> c
+ [a, c]
+ Categories (2, object): [a, c]
+ """
+
+ if fastpath:
+ new_dtype = CategoricalDtype._from_fastpath(categories,
+ self.ordered)
+ else:
+ new_dtype = CategoricalDtype(categories, ordered=self.ordered)
+ if (not fastpath and self.dtype.categories is not None and
+ len(new_dtype.categories) != len(self.dtype.categories)):
+ raise ValueError("new categories need to have the same number of "
+ "items than the old categories!")
+
+ self._dtype = new_dtype
+
+ def _codes_for_groupby(self, sort):
+ """
+ If sort=False, return a copy of self, coded with categories as
+ returned by .unique(), followed by any categories not appearing in
+ the data. If sort=True, return self.
+
+ This method is needed solely to ensure the categorical index of the
+ GroupBy result has categories in the order of appearance in the data
+ (GH-8868).
+
+ Parameters
+ ----------
+ sort : boolean
+ The value of the sort parameter groupby was called with.
+
+ Returns
+ -------
+ Categorical
+ If sort=False, the new categories are set to the order of
+ appearance in codes (unless ordered=True, in which case the
+ original order is preserved), followed by any unrepresented
+ categories in the original order.
+ """
+
+ # Already sorted according to self.categories; all is fine
+ if sort:
+ return self
+
+ # sort=False should order groups in as-encountered order (GH-8868)
+ cat = self.unique()
+
+ # But for groupby to work, all categories should be present,
+ # including those missing from the data (GH-13179), which .unique()
+ # above dropped
+ cat.add_categories(
+ self.categories[~self.categories.isin(cat.categories)],
+ inplace=True)
+
+ return self.reorder_categories(cat.categories)
+
+ def _set_dtype(self, dtype):
+ """Internal method for directly updating the CategoricalDtype
+
+ Parameters
+ ----------
+ dtype : CategoricalDtype
+
+ Notes
+ -----
+ We don't do any validation here. It's assumed that the dtype is
+ a (valid) instance of `CategoricalDtype`.
+ """
+ codes = _recode_for_categories(self.codes, self.categories,
+ dtype.categories)
+ return type(self)(codes, dtype=dtype, fastpath=True)
+
+ def set_ordered(self, value, inplace=False):
+ """
+ Sets the ordered attribute to the boolean value
+
+ Parameters
+ ----------
+ value : boolean to set whether this categorical is ordered (True) or
+ not (False)
+ inplace : boolean (default: False)
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to the value
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ new_dtype = CategoricalDtype(self.categories, ordered=value)
+ cat = self if inplace else self.copy()
+ cat._dtype = new_dtype
+ if not inplace:
+ return cat
+
+ def as_ordered(self, inplace=False):
+ """
+ Sets the Categorical to be ordered
+
+ Parameters
+ ----------
+ inplace : boolean (default: False)
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to True
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ return self.set_ordered(True, inplace=inplace)
+
+ def as_unordered(self, inplace=False):
+ """
+ Sets the Categorical to be unordered
+
+ Parameters
+ ----------
+ inplace : boolean (default: False)
+ Whether or not to set the ordered attribute inplace or return a copy
+ of this categorical with ordered set to False
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ return self.set_ordered(False, inplace=inplace)
+
+ def set_categories(self, new_categories, ordered=None, rename=False,
+ inplace=False):
+ """ Sets the categories to the specified new_categories.
+
+ `new_categories` can include new categories (which will result in
+ unused categories) or remove old categories (which results in values
+ set to NaN). If `rename==True`, the categories will simple be renamed
+ (less or more items than in old categories will result in values set to
+ NaN or in unused categories respectively).
+
+ This method can be used to perform more than one action of adding,
+ removing, and reordering simultaneously and is therefore faster than
+ performing the individual steps via the more specialised methods.
+
+ On the other hand this methods does not do checks (e.g., whether the
+ old categories are included in the new categories on a reorder), which
+ can result in surprising changes, for example when using special string
+ dtypes on python3, which does not considers a S1 string equal to a
+ single char python string.
+
+ Raises
+ ------
+ ValueError
+ If new_categories does not validate as categories
+
+ Parameters
+ ----------
+ new_categories : Index-like
+ The categories in new order.
+ ordered : boolean, (default: False)
+ Whether or not the categorical is treated as a ordered categorical.
+ If not given, do not change the ordered information.
+ rename : boolean (default: False)
+ Whether or not the new_categories should be considered as a rename
+ of the old categories or as reordered categories.
+ inplace : boolean (default: False)
+ Whether or not to reorder the categories inplace or return a copy of
+ this categorical with reordered categories.
+
+ Returns
+ -------
+ cat : Categorical with reordered categories or None if inplace.
+
+ See also
+ --------
+ rename_categories
+ reorder_categories
+ add_categories
+ remove_categories
+ remove_unused_categories
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ if ordered is None:
+ ordered = self.dtype.ordered
+ new_dtype = CategoricalDtype(new_categories, ordered=ordered)
+
+ cat = self if inplace else self.copy()
+ if rename:
+ if (cat.dtype.categories is not None and
+ len(new_dtype.categories) < len(cat.dtype.categories)):
+ # remove all _codes which are larger and set to -1/NaN
+ self._codes[self._codes >= len(new_dtype.categories)] = -1
+ else:
+ codes = _recode_for_categories(self.codes, self.categories,
+ new_dtype.categories)
+ cat._codes = codes
+ cat._dtype = new_dtype
+
+ if not inplace:
+ return cat
+
+ def rename_categories(self, new_categories, inplace=False):
+ """ Renames categories.
+
+ Raises
+ ------
+ ValueError
+ If new categories are list-like and do not have the same number of
+ items than the current categories or do not validate as categories
+
+ Parameters
+ ----------
+ new_categories : list-like, dict-like or callable
+
+ * list-like: all items must be unique and the number of items in
+ the new categories must match the existing number of categories.
+
+ * dict-like: specifies a mapping from
+ old categories to new. Categories not contained in the mapping
+ are passed through and extra categories in the mapping are
+ ignored.
+
+ .. versionadded:: 0.21.0
+
+ * callable : a callable that is called on all items in the old
+ categories and whose return values comprise the new categories.
+
+ .. versionadded:: 0.23.0
+
+ .. warning::
+
+ Currently, Series are considered list like. In a future version
+ of pandas they'll be considered dict-like.
+
+ inplace : boolean (default: False)
+ Whether or not to rename the categories inplace or return a copy of
+ this categorical with renamed categories.
+
+ Returns
+ -------
+ cat : Categorical or None
+ With ``inplace=False``, the new categorical is returned.
+ With ``inplace=True``, there is no return value.
+
+ See also
+ --------
+ reorder_categories
+ add_categories
+ remove_categories
+ remove_unused_categories
+ set_categories
+
+ Examples
+ --------
+ >>> c = Categorical(['a', 'a', 'b'])
+ >>> c.rename_categories([0, 1])
+ [0, 0, 1]
+ Categories (2, int64): [0, 1]
+
+ For dict-like ``new_categories``, extra keys are ignored and
+ categories not in the dictionary are passed through
+
+ >>> c.rename_categories({'a': 'A', 'c': 'C'})
+ [A, A, b]
+ Categories (2, object): [A, b]
+
+ You may also provide a callable to create the new categories
+
+ >>> c.rename_categories(lambda x: x.upper())
+ [A, A, B]
+ Categories (2, object): [A, B]
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ cat = self if inplace else self.copy()
+
+ if isinstance(new_categories, ABCSeries):
+ msg = ("Treating Series 'new_categories' as a list-like and using "
+ "the values. In a future version, 'rename_categories' will "
+ "treat Series like a dictionary.\n"
+ "For dict-like, use 'new_categories.to_dict()'\n"
+ "For list-like, use 'new_categories.values'.")
+ warn(msg, FutureWarning, stacklevel=2)
+ new_categories = list(new_categories)
+
+ if is_dict_like(new_categories):
+ cat.categories = [new_categories.get(item, item)
+ for item in cat.categories]
+ elif callable(new_categories):
+ cat.categories = [new_categories(item) for item in cat.categories]
+ else:
+ cat.categories = new_categories
+ if not inplace:
+ return cat
+
+ def reorder_categories(self, new_categories, ordered=None, inplace=False):
+ """ Reorders categories as specified in new_categories.
+
+ `new_categories` need to include all old categories and no new category
+ items.
+
+ Raises
+ ------
+ ValueError
+ If the new categories do not contain all old category items or any
+ new ones
+
+ Parameters
+ ----------
+ new_categories : Index-like
+ The categories in new order.
+ ordered : boolean, optional
+ Whether or not the categorical is treated as a ordered categorical.
+ If not given, do not change the ordered information.
+ inplace : boolean (default: False)
+ Whether or not to reorder the categories inplace or return a copy of
+ this categorical with reordered categories.
+
+ Returns
+ -------
+ cat : Categorical with reordered categories or None if inplace.
+
+ See also
+ --------
+ rename_categories
+ add_categories
+ remove_categories
+ remove_unused_categories
+ set_categories
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ if set(self.dtype.categories) != set(new_categories):
+ raise ValueError("items in new_categories are not the same as in "
+ "old categories")
+ return self.set_categories(new_categories, ordered=ordered,
+ inplace=inplace)
+
+ def add_categories(self, new_categories, inplace=False):
+ """ Add new categories.
+
+ `new_categories` will be included at the last/highest place in the
+ categories and will be unused directly after this call.
+
+ Raises
+ ------
+ ValueError
+ If the new categories include old categories or do not validate as
+ categories
+
+ Parameters
+ ----------
+ new_categories : category or list-like of category
+ The new categories to be included.
+ inplace : boolean (default: False)
+ Whether or not to add the categories inplace or return a copy of
+ this categorical with added categories.
+
+ Returns
+ -------
+ cat : Categorical with new categories added or None if inplace.
+
+ See also
+ --------
+ rename_categories
+ reorder_categories
+ remove_categories
+ remove_unused_categories
+ set_categories
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ if not is_list_like(new_categories):
+ new_categories = [new_categories]
+ already_included = set(new_categories) & set(self.dtype.categories)
+ if len(already_included) != 0:
+ msg = ("new categories must not include old categories: "
+ "{already_included!s}")
+ raise ValueError(msg.format(already_included=already_included))
+ new_categories = list(self.dtype.categories) + list(new_categories)
+ new_dtype = CategoricalDtype(new_categories, self.ordered)
+
+ cat = self if inplace else self.copy()
+ cat._dtype = new_dtype
+ cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
+ if not inplace:
+ return cat
+
+ def remove_categories(self, removals, inplace=False):
+ """ Removes the specified categories.
+
+ `removals` must be included in the old categories. Values which were in
+ the removed categories will be set to NaN
+
+ Raises
+ ------
+ ValueError
+ If the removals are not contained in the categories
+
+ Parameters
+ ----------
+ removals : category or list of categories
+ The categories which should be removed.
+ inplace : boolean (default: False)
+ Whether or not to remove the categories inplace or return a copy of
+ this categorical with removed categories.
+
+ Returns
+ -------
+ cat : Categorical with removed categories or None if inplace.
+
+ See also
+ --------
+ rename_categories
+ reorder_categories
+ add_categories
+ remove_unused_categories
+ set_categories
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ if not is_list_like(removals):
+ removals = [removals]
+
+ removal_set = set(list(removals))
+ not_included = removal_set - set(self.dtype.categories)
+ new_categories = [c for c in self.dtype.categories
+ if c not in removal_set]
+
+ # GH 10156
+ if any(isna(removals)):
+ not_included = [x for x in not_included if notna(x)]
+ new_categories = [x for x in new_categories if notna(x)]
+
+ if len(not_included) != 0:
+ msg = "removals must all be in old categories: {not_included!s}"
+ raise ValueError(msg.format(not_included=not_included))
+
+ return self.set_categories(new_categories, ordered=self.ordered,
+ rename=False, inplace=inplace)
+
+ def remove_unused_categories(self, inplace=False):
+ """ Removes categories which are not used.
+
+ Parameters
+ ----------
+ inplace : boolean (default: False)
+ Whether or not to drop unused categories inplace or return a copy of
+ this categorical with unused categories dropped.
+
+ Returns
+ -------
+ cat : Categorical with unused categories dropped or None if inplace.
+
+ See also
+ --------
+ rename_categories
+ reorder_categories
+ add_categories
+ remove_categories
+ set_categories
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ cat = self if inplace else self.copy()
+ idx, inv = np.unique(cat._codes, return_inverse=True)
+
+ if idx.size != 0 and idx[0] == -1: # na sentinel
+ idx, inv = idx[1:], inv - 1
+
+ new_categories = cat.dtype.categories.take(idx)
+ new_dtype = CategoricalDtype._from_fastpath(new_categories,
+ ordered=self.ordered)
+ cat._dtype = new_dtype
+ cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
+
+ if not inplace:
+ return cat
+
+ def map(self, mapper):
+ """Apply mapper function to its categories (not codes).
+
+ Parameters
+ ----------
+ mapper : callable
+ Function to be applied. When all categories are mapped
+ to different categories, the result will be Categorical which has
+ the same order property as the original. Otherwise, the result will
+ be np.ndarray.
+
+ Returns
+ -------
+ applied : Categorical or Index.
+
+ """
+ new_categories = self.categories.map(mapper)
+ try:
+ return self.from_codes(self._codes.copy(),
+ categories=new_categories,
+ ordered=self.ordered)
+ except ValueError:
+ return np.take(new_categories, self._codes)
+
+ __eq__ = _cat_compare_op('__eq__')
+ __ne__ = _cat_compare_op('__ne__')
+ __lt__ = _cat_compare_op('__lt__')
+ __gt__ = _cat_compare_op('__gt__')
+ __le__ = _cat_compare_op('__le__')
+ __ge__ = _cat_compare_op('__ge__')
+
+ # for Series/ndarray like compat
+ @property
+ def shape(self):
+ """ Shape of the Categorical.
+
+ For internal compatibility with numpy arrays.
+
+ Returns
+ -------
+ shape : tuple
+ """
+
+ return tuple([len(self._codes)])
+
+ def shift(self, periods):
+ """
+ Shift Categorical by desired number of periods.
+
+ Parameters
+ ----------
+ periods : int
+ Number of periods to move, can be positive or negative
+
+ Returns
+ -------
+ shifted : Categorical
+ """
+ # since categoricals always have ndim == 1, an axis parameter
+ # doesn't make any sense here.
+ codes = self.codes
+ if codes.ndim > 1:
+ raise NotImplementedError("Categorical with ndim > 1.")
+ if np.prod(codes.shape) and (periods != 0):
+ codes = np.roll(codes, _ensure_platform_int(periods), axis=0)
+ if periods > 0:
+ codes[:periods] = -1
+ else:
+ codes[periods:] = -1
+
+ return self.from_codes(codes, categories=self.categories,
+ ordered=self.ordered)
+
+ def __array__(self, dtype=None):
+ """
+ The numpy array interface.
+
+ Returns
+ -------
+ values : numpy array
+ A numpy array of either the specified dtype or,
+ if dtype==None (default), the same dtype as
+ categorical.categories.dtype
+ """
+ ret = take_1d(self.categories.values, self._codes)
+ if dtype and not is_dtype_equal(dtype, self.categories.dtype):
+ return np.asarray(ret, dtype)
+ return ret
+
+ def __setstate__(self, state):
+ """Necessary for making this object picklable"""
+ if not isinstance(state, dict):
+ raise Exception('invalid pickle state')
+
+ # Provide compatibility with pre-0.15.0 Categoricals.
+ if '_categories' not in state and '_levels' in state:
+ state['_categories'] = self.dtype._validate_categories(state.pop(
+ '_levels'))
+ if '_codes' not in state and 'labels' in state:
+ state['_codes'] = coerce_indexer_dtype(
+ state.pop('labels'), state['_categories'])
+
+ # 0.16.0 ordered change
+ if '_ordered' not in state:
+
+ # >=15.0 < 0.16.0
+ if 'ordered' in state:
+ state['_ordered'] = state.pop('ordered')
+ else:
+ state['_ordered'] = False
+
+ # 0.21.0 CategoricalDtype change
+ if '_dtype' not in state:
+ state['_dtype'] = CategoricalDtype(state['_categories'],
+ state['_ordered'])
+
+ for k, v in compat.iteritems(state):
+ setattr(self, k, v)
+
+ @property
+ def T(self):
+ return self
+
+ @property
+ def nbytes(self):
+ return self._codes.nbytes + self.dtype.categories.values.nbytes
+
+ def memory_usage(self, deep=False):
+ """
+ Memory usage of my values
+
+ Parameters
+ ----------
+ deep : bool
+ Introspect the data deeply, interrogate
+ `object` dtypes for system-level memory consumption
+
+ Returns
+ -------
+ bytes used
+
+ Notes
+ -----
+ Memory usage does not include memory consumed by elements that
+ are not components of the array if deep=False
+
+ See Also
+ --------
+ numpy.ndarray.nbytes
+ """
+ return self._codes.nbytes + self.dtype.categories.memory_usage(
+ deep=deep)
+
+ @Substitution(klass='Categorical')
+ @Appender(_shared_docs['searchsorted'])
+ @deprecate_kwarg(old_arg_name='v', new_arg_name='value')
+ def searchsorted(self, value, side='left', sorter=None):
+ if not self.ordered:
+ raise ValueError("Categorical not ordered\nyou can use "
+ ".as_ordered() to change the Categorical to an "
+ "ordered one")
+
+ from pandas.core.series import Series
+
+ values_as_codes = _get_codes_for_values(Series(value).values,
+ self.categories)
+
+ if -1 in values_as_codes:
+ raise ValueError("Value(s) to be inserted must be in categories.")
+
+ return self.codes.searchsorted(values_as_codes, side=side,
+ sorter=sorter)
+
+ def isna(self):
+ """
+ Detect missing values
+
+ Both missing values (-1 in .codes) and NA as a category are detected.
+
+ Returns
+ -------
+ a boolean array of whether my values are null
+
+ See also
+ --------
+ isna : top-level isna
+ isnull : alias of isna
+ Categorical.notna : boolean inverse of Categorical.isna
+
+ """
+
+ ret = self._codes == -1
+
+ # String/object and float categories can hold np.nan
+ if self.categories.dtype.kind in ['S', 'O', 'f']:
+ if np.nan in self.categories:
+ nan_pos = np.where(isna(self.categories))[0]
+ # we only have one NA in categories
+ ret = np.logical_or(ret, self._codes == nan_pos)
+ return ret
+ isnull = isna
+
+ def notna(self):
+ """
+ Inverse of isna
+
+ Both missing values (-1 in .codes) and NA as a category are detected as
+ null.
+
+ Returns
+ -------
+ a boolean array of whether my values are not null
+
+ See also
+ --------
+ notna : top-level notna
+ notnull : alias of notna
+ Categorical.isna : boolean inverse of Categorical.notna
+
+ """
+ return ~self.isna()
+ notnull = notna
+
+ def put(self, *args, **kwargs):
+ """
+ Replace specific elements in the Categorical with given values.
+ """
+ raise NotImplementedError(("'put' is not yet implemented "
+ "for Categorical"))
+
+ def dropna(self):
+ """
+ Return the Categorical without null values.
+
+ Both missing values (-1 in .codes) and NA as a category are detected.
+ NA is removed from the categories if present.
+
+ Returns
+ -------
+ valid : Categorical
+ """
+ result = self[self.notna()]
+ if isna(result.categories).any():
+ result = result.remove_categories([np.nan])
+ return result
+
+ def value_counts(self, dropna=True):
+ """
+ Returns a Series containing counts of each category.
+
+ Every category will have an entry, even those with a count of 0.
+
+ Parameters
+ ----------
+ dropna : boolean, default True
+ Don't include counts of NaN, even if NaN is a category.
+
+ Returns
+ -------
+ counts : Series
+
+ See Also
+ --------
+ Series.value_counts
+
+ """
+ from numpy import bincount
+ from pandas import isna, Series, CategoricalIndex
+
+ obj = (self.remove_categories([np.nan]) if dropna and
+ isna(self.categories).any() else self)
+ code, cat = obj._codes, obj.categories
+ ncat, mask = len(cat), 0 <= code
+ ix, clean = np.arange(ncat), mask.all()
+
+ if dropna or clean:
+ obs = code if clean else code[mask]
+ count = bincount(obs, minlength=ncat or None)
+ else:
+ count = bincount(np.where(mask, code, ncat))
+ ix = np.append(ix, -1)
+
+ ix = self._constructor(ix, dtype=self.dtype,
+ fastpath=True)
+
+ return Series(count, index=CategoricalIndex(ix), dtype='int64')
+
+ def get_values(self):
+ """ Return the values.
+
+ For internal compatibility with pandas formatting.
+
+ Returns
+ -------
+ values : numpy array
+ A numpy array of the same dtype as categorical.categories.dtype or
+ Index if datetime / periods
+ """
+ # if we are a datetime and period index, return Index to keep metadata
+ if is_datetimelike(self.categories):
+ return self.categories.take(self._codes, fill_value=np.nan)
+ return np.array(self)
+
+ def check_for_ordered(self, op):
+ """ assert that we are ordered """
+ if not self.ordered:
+ raise TypeError("Categorical is not ordered for operation {op}\n"
+ "you can use .as_ordered() to change the "
+ "Categorical to an ordered one\n".format(op=op))
+
+ def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
+ """
+ Returns the indices that would sort the Categorical instance if
+ 'sort_values' was called. This function is implemented to provide
+ compatibility with numpy ndarray objects.
+
+ While an ordering is applied to the category values, arg-sorting
+ in this context refers more to organizing and grouping together
+ based on matching category values. Thus, this function can be
+ called on an unordered Categorical instance unlike the functions
+ 'Categorical.min' and 'Categorical.max'.
+
+ Returns
+ -------
+ argsorted : numpy array
+
+ See also
+ --------
+ numpy.ndarray.argsort
+ """
+ ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
+ result = np.argsort(self._codes.copy(), kind=kind, **kwargs)
+ if not ascending:
+ result = result[::-1]
+ return result
+
+ def sort_values(self, inplace=False, ascending=True, na_position='last'):
+ """ Sorts the Categorical by category value returning a new
+ Categorical by default.
+
+ While an ordering is applied to the category values, sorting in this
+ context refers more to organizing and grouping together based on
+ matching category values. Thus, this function can be called on an
+ unordered Categorical instance unlike the functions 'Categorical.min'
+ and 'Categorical.max'.
+
+ Parameters
+ ----------
+ inplace : boolean, default False
+ Do operation in place.
+ ascending : boolean, default True
+ Order ascending. Passing False orders descending. The
+ ordering parameter provides the method by which the
+ category values are organized.
+ na_position : {'first', 'last'} (optional, default='last')
+ 'first' puts NaNs at the beginning
+ 'last' puts NaNs at the end
+
+ Returns
+ -------
+ y : Categorical or None
+
+ See Also
+ --------
+ Categorical.sort
+ Series.sort_values
+
+ Examples
+ --------
+ >>> c = pd.Categorical([1, 2, 2, 1, 5])
+ >>> c
+ [1, 2, 2, 1, 5]
+ Categories (3, int64): [1, 2, 5]
+ >>> c.sort_values()
+ [1, 1, 2, 2, 5]
+ Categories (3, int64): [1, 2, 5]
+ >>> c.sort_values(ascending=False)
+ [5, 2, 2, 1, 1]
+ Categories (3, int64): [1, 2, 5]
+
+ Inplace sorting can be done as well:
+
+ >>> c.sort_values(inplace=True)
+ >>> c
+ [1, 1, 2, 2, 5]
+ Categories (3, int64): [1, 2, 5]
+ >>>
+ >>> c = pd.Categorical([1, 2, 2, 1, 5])
+
+ 'sort_values' behaviour with NaNs. Note that 'na_position'
+ is independent of the 'ascending' parameter:
+
+ >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
+ >>> c
+ [NaN, 2.0, 2.0, NaN, 5.0]
+ Categories (2, int64): [2, 5]
+ >>> c.sort_values()
+ [2.0, 2.0, 5.0, NaN, NaN]
+ Categories (2, int64): [2, 5]
+ >>> c.sort_values(ascending=False)
+ [5.0, 2.0, 2.0, NaN, NaN]
+ Categories (2, int64): [2, 5]
+ >>> c.sort_values(na_position='first')
+ [NaN, NaN, 2.0, 2.0, 5.0]
+ Categories (2, int64): [2, 5]
+ >>> c.sort_values(ascending=False, na_position='first')
+ [NaN, NaN, 5.0, 2.0, 2.0]
+ Categories (2, int64): [2, 5]
+ """
+ inplace = validate_bool_kwarg(inplace, 'inplace')
+ if na_position not in ['last', 'first']:
+ msg = 'invalid na_position: {na_position!r}'
+ raise ValueError(msg.format(na_position=na_position))
+
+ codes = np.sort(self._codes)
+ if not ascending:
+ codes = codes[::-1]
+
+ # NaN handling
+ na_mask = (codes == -1)
+ if na_mask.any():
+ n_nans = len(codes[na_mask])
+ if na_position == "first":
+ # in this case sort to the front
+ new_codes = codes.copy()
+ new_codes[0:n_nans] = -1
+ new_codes[n_nans:] = codes[~na_mask]
+ codes = new_codes
+ elif na_position == "last":
+ # ... and to the end
+ new_codes = codes.copy()
+ pos = len(codes) - n_nans
+ new_codes[0:pos] = codes[~na_mask]
+ new_codes[pos:] = -1
+ codes = new_codes
+ if inplace:
+ self._codes = codes
+ return
+ else:
+ return self._constructor(values=codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+
+ def _values_for_rank(self):
+ """
+ For correctly ranking ordered categorical data. See GH#15420
+
+ Ordered categorical data should be ranked on the basis of
+ codes with -1 translated to NaN.
+
+ Returns
+ -------
+ numpy array
+
+ """
+ from pandas import Series
+ if self.ordered:
+ values = self.codes
+ mask = values == -1
+ if mask.any():
+ values = values.astype('float64')
+ values[mask] = np.nan
+ elif self.categories.is_numeric():
+ values = np.array(self)
+ else:
+ # reorder the categories (so rank can use the float codes)
+ # instead of passing an object array to rank
+ values = np.array(
+ self.rename_categories(Series(self.categories).rank().values)
+ )
+ return values
+
+ def ravel(self, order='C'):
+ """ Return a flattened (numpy) array.
+
+ For internal compatibility with numpy arrays.
+
+ Returns
+ -------
+ raveled : numpy array
+ """
+ return np.array(self)
+
+ def view(self):
+ """Return a view of myself.
+
+ For internal compatibility with numpy arrays.
+
+ Returns
+ -------
+ view : Categorical
+ Returns `self`!
+ """
+ return self
+
+ def to_dense(self):
+ """Return my 'dense' representation
+
+ For internal compatibility with numpy arrays.
+
+ Returns
+ -------
+ dense : array
+ """
+ return np.asarray(self)
+
+ @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
+ def fillna(self, value=None, method=None, limit=None):
+ """ Fill NA/NaN values using the specified method.
+
+ Parameters
+ ----------
+ method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
+ Method to use for filling holes in reindexed Series
+ pad / ffill: propagate last valid observation forward to next valid
+ backfill / bfill: use NEXT valid observation to fill gap
+ value : scalar, dict, Series
+ If a scalar value is passed it is used to fill all missing values.
+ Alternatively, a Series or dict can be used to fill in different
+ values for each index. The value should not be a list. The
+ value(s) passed should either be in the categories or should be
+ NaN.
+ limit : int, default None
+ (Not implemented yet for Categorical!)
+ If method is specified, this is the maximum number of consecutive
+ NaN values to forward/backward fill. In other words, if there is
+ a gap with more than this number of consecutive NaNs, it will only
+ be partially filled. If method is not specified, this is the
+ maximum number of entries along the entire axis where NaNs will be
+ filled.
+
+ Returns
+ -------
+ filled : Categorical with NA/NaN filled
+ """
+
+ if value is None:
+ value = np.nan
+ if limit is not None:
+ raise NotImplementedError("specifying a limit for fillna has not "
+ "been implemented yet")
+
+ values = self._codes
+
+ # Make sure that we also get NA in categories
+ if self.categories.dtype.kind in ['S', 'O', 'f']:
+ if np.nan in self.categories:
+ values = values.copy()
+ nan_pos = np.where(isna(self.categories))[0]
+ # we only have one NA in categories
+ values[values == nan_pos] = -1
+
+ # pad / bfill
+ if method is not None:
+
+ values = self.to_dense().reshape(-1, len(self))
+ values = interpolate_2d(values, method, 0, None,
+ value).astype(self.categories.dtype)[0]
+ values = _get_codes_for_values(values, self.categories)
+
+ else:
+
+ # If value is a dict or a Series (a dict value has already
+ # been converted to a Series)
+ if isinstance(value, ABCSeries):
+ if not value[~value.isin(self.categories)].isna().all():
+ raise ValueError("fill value must be in categories")
+
+ values_codes = _get_codes_for_values(value, self.categories)
+ indexer = np.where(values_codes != -1)
+ values[indexer] = values_codes[values_codes != -1]
+
+ # If value is not a dict or Series it should be a scalar
+ elif is_scalar(value):
+ if not isna(value) and value not in self.categories:
+ raise ValueError("fill value must be in categories")
+
+ mask = values == -1
+ if mask.any():
+ values = values.copy()
+ if isna(value):
+ values[mask] = -1
+ else:
+ values[mask] = self.categories.get_loc(value)
+
+ else:
+ raise TypeError('"value" parameter must be a scalar, dict '
+ 'or Series, but you passed a '
+ '"{0}"'.format(type(value).__name__))
+
+ return self._constructor(values, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+
+ def take_nd(self, indexer, allow_fill=True, fill_value=None):
+ """ Take the codes by the indexer, fill with the fill_value.
+
+ For internal compatibility with numpy arrays.
+ """
+
+ # filling must always be None/nan here
+ # but is passed thru internally
+ assert isna(fill_value)
+
+ codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
+ result = self._constructor(codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+ return result
+
+ take = take_nd
+
+ def _slice(self, slicer):
+ """ Return a slice of myself.
+
+ For internal compatibility with numpy arrays.
+ """
+
+ # only allow 1 dimensional slicing, but can
+ # in a 2-d case be passd (slice(None),....)
+ if isinstance(slicer, tuple) and len(slicer) == 2:
+ if not is_null_slice(slicer[0]):
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
+ slicer = slicer[1]
+
+ _codes = self._codes[slicer]
+ return self._constructor(values=_codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+
+ def __len__(self):
+ """The length of this Categorical."""
+ return len(self._codes)
+
+ def __iter__(self):
+ """Returns an Iterator over the values of this Categorical."""
+ return iter(self.get_values())
+
+ def _tidy_repr(self, max_vals=10, footer=True):
+ """ a short repr displaying only max_vals and an optional (but default
+ footer)
+ """
+ num = max_vals // 2
+ head = self[:num]._get_repr(length=False, footer=False)
+ tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
+
+ result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
+ if footer:
+ result = u('{result}\n{footer}').format(result=result,
+ footer=self._repr_footer())
+
+ return compat.text_type(result)
+
+ def _repr_categories(self):
+ """ return the base repr for the categories """
+ max_categories = (10 if get_option("display.max_categories") == 0 else
+ get_option("display.max_categories"))
+ from pandas.io.formats import format as fmt
+ if len(self.categories) > max_categories:
+ num = max_categories // 2
+ head = fmt.format_array(self.categories[:num], None)
+ tail = fmt.format_array(self.categories[-num:], None)
+ category_strs = head + ["..."] + tail
+ else:
+ category_strs = fmt.format_array(self.categories, None)
+
+ # Strip all leading spaces, which format_array adds for columns...
+ category_strs = [x.strip() for x in category_strs]
+ return category_strs
+
+ def _repr_categories_info(self):
+ """ Returns a string representation of the footer."""
+
+ category_strs = self._repr_categories()
+ dtype = getattr(self.categories, 'dtype_str',
+ str(self.categories.dtype))
+
+ levheader = "Categories ({length}, {dtype}): ".format(
+ length=len(self.categories), dtype=dtype)
+ width, height = get_terminal_size()
+ max_width = get_option("display.width") or width
+ if com.in_ipython_frontend():
+ # 0 = no breaks
+ max_width = 0
+ levstring = ""
+ start = True
+ cur_col_len = len(levheader) # header
+ sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
+ linesep = sep.rstrip() + "\n" # remove whitespace
+ for val in category_strs:
+ if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
+ levstring += linesep + (" " * (len(levheader) + 1))
+ cur_col_len = len(levheader) + 1 # header + a whitespace
+ elif not start:
+ levstring += sep
+ cur_col_len += len(val)
+ levstring += val
+ start = False
+ # replace to simple save space by
+ return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
+
+ def _repr_footer(self):
+
+ return u('Length: {length}\n{info}').format(
+ length=len(self), info=self._repr_categories_info())
+
+ def _get_repr(self, length=True, na_rep='NaN', footer=True):
+ from pandas.io.formats import format as fmt
+ formatter = fmt.CategoricalFormatter(self, length=length,
+ na_rep=na_rep, footer=footer)
+ result = formatter.to_string()
+ return compat.text_type(result)
+
+ def __unicode__(self):
+ """ Unicode representation. """
+ _maxlen = 10
+ if len(self._codes) > _maxlen:
+ result = self._tidy_repr(_maxlen)
+ elif len(self._codes) > 0:
+ result = self._get_repr(length=len(self) > _maxlen)
+ else:
+ msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
+ result = ('[], {repr_msg}'.format(repr_msg=msg))
+
+ return result
+
+ def _maybe_coerce_indexer(self, indexer):
+ """ return an indexer coerced to the codes dtype """
+ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
+ indexer = indexer.astype(self._codes.dtype)
+ return indexer
+
+ def __getitem__(self, key):
+ """ Return an item. """
+ if isinstance(key, (int, np.integer)):
+ i = self._codes[key]
+ if i == -1:
+ return np.nan
+ else:
+ return self.categories[i]
+ else:
+ return self._constructor(values=self._codes[key],
+ categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+
+ def __setitem__(self, key, value):
+ """ Item assignment.
+
+
+ Raises
+ ------
+ ValueError
+ If (one or more) Value is not in categories or if a assigned
+ `Categorical` does not have the same categories
+ """
+
+ # require identical categories set
+ if isinstance(value, Categorical):
+ if not value.categories.equals(self.categories):
+ raise ValueError("Cannot set a Categorical with another, "
+ "without identical categories")
+
+ rvalue = value if is_list_like(value) else [value]
+
+ from pandas import Index
+ to_add = Index(rvalue).difference(self.categories)
+
+ # no assignments of values not in categories, but it's always ok to set
+ # something to np.nan
+ if len(to_add) and not isna(to_add).all():
+ raise ValueError("Cannot setitem on a Categorical with a new "
+ "category, set the categories first")
+
+ # set by position
+ if isinstance(key, (int, np.integer)):
+ pass
+
+ # tuple of indexers (dataframe)
+ elif isinstance(key, tuple):
+ # only allow 1 dimensional slicing, but can
+ # in a 2-d case be passd (slice(None),....)
+ if len(key) == 2:
+ if not is_null_slice(key[0]):
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
+ key = key[1]
+ elif len(key) == 1:
+ key = key[0]
+ else:
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
+
+ # slicing in Series or Categorical
+ elif isinstance(key, slice):
+ pass
+
+ # Array of True/False in Series or Categorical
+ else:
+ # There is a bug in numpy, which does not accept a Series as a
+ # indexer
+ # https://github.com/pandas-dev/pandas/issues/6168
+ # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
+ # FIXME: remove when numpy 1.9 is the lowest numpy version pandas
+ # accepts...
+ key = np.asarray(key)
+
+ lindexer = self.categories.get_indexer(rvalue)
+
+ # FIXME: the following can be removed after GH7820 is fixed:
+ # https://github.com/pandas-dev/pandas/issues/7820
+ # float categories do currently return -1 for np.nan, even if np.nan is
+ # included in the index -> "repair" this here
+ if isna(rvalue).any() and isna(self.categories).any():
+ nan_pos = np.where(isna(self.categories))[0]
+ lindexer[lindexer == -1] = nan_pos
+
+ lindexer = self._maybe_coerce_indexer(lindexer)
+ self._codes[key] = lindexer
+
+ def _reverse_indexer(self):
+ """
+ Compute the inverse of a categorical, returning
+ a dict of categories -> indexers.
+
+ *This is an internal function*
+
+ Returns
+ -------
+ dict of categories -> indexers
+
+ Example
+ -------
+ In [1]: c = pd.Categorical(list('aabca'))
+
+ In [2]: c
+ Out[2]:
+ [a, a, b, c, a]
+ Categories (3, object): [a, b, c]
+
+ In [3]: c.categories
+ Out[3]: Index([u'a', u'b', u'c'], dtype='object')
+
+ In [4]: c.codes
+ Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
+
+ In [5]: c._reverse_indexer()
+ Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
+
+ """
+ categories = self.categories
+ r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
+ categories.size)
+ counts = counts.cumsum()
+ result = [r[counts[indexer]:counts[indexer + 1]]
+ for indexer in range(len(counts) - 1)]
+ result = dict(zip(categories, result))
+ return result
+
+ # reduction ops #
+ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
+ filter_type=None, **kwds):
+ """ perform the reduction type operation """
+ func = getattr(self, name, None)
+ if func is None:
+ msg = 'Categorical cannot perform the operation {op}'
+ raise TypeError(msg.format(op=name))
+ return func(numeric_only=numeric_only, **kwds)
+
+ def min(self, numeric_only=None, **kwargs):
+ """ The minimum value of the object.
+
+ Only ordered `Categoricals` have a minimum!
+
+ Raises
+ ------
+ TypeError
+ If the `Categorical` is not `ordered`.
+
+ Returns
+ -------
+ min : the minimum of this `Categorical`
+ """
+ self.check_for_ordered('min')
+ if numeric_only:
+ good = self._codes != -1
+ pointer = self._codes[good].min(**kwargs)
+ else:
+ pointer = self._codes.min(**kwargs)
+ if pointer == -1:
+ return np.nan
+ else:
+ return self.categories[pointer]
+
+ def max(self, numeric_only=None, **kwargs):
+ """ The maximum value of the object.
+
+ Only ordered `Categoricals` have a maximum!
+
+ Raises
+ ------
+ TypeError
+ If the `Categorical` is not `ordered`.
+
+ Returns
+ -------
+ max : the maximum of this `Categorical`
+ """
+ self.check_for_ordered('max')
+ if numeric_only:
+ good = self._codes != -1
+ pointer = self._codes[good].max(**kwargs)
+ else:
+ pointer = self._codes.max(**kwargs)
+ if pointer == -1:
+ return np.nan
+ else:
+ return self.categories[pointer]
+
+ def mode(self):
+ """
+ Returns the mode(s) of the Categorical.
+
+ Always returns `Categorical` even if only one value.
+
+ Returns
+ -------
+ modes : `Categorical` (sorted)
+ """
+
+ import pandas._libs.hashtable as htable
+ good = self._codes != -1
+ values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
+ result = self._constructor(values=values, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+ return result
+
+ def unique(self):
+ """
+ Return the ``Categorical`` which ``categories`` and ``codes`` are
+ unique. Unused categories are NOT returned.
+
+ - unordered category: values and categories are sorted by appearance
+ order.
+ - ordered category: values are sorted by appearance order, categories
+ keeps existing order.
+
+ Returns
+ -------
+ unique values : ``Categorical``
+
+ Examples
+ --------
+ An unordered Categorical will return categories in the
+ order of appearance.
+
+ >>> pd.Categorical(list('baabc'))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ >>> pd.Categorical(list('baabc'), categories=list('abc'))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ An ordered Categorical preserves the category ordering.
+
+ >>> pd.Categorical(list('baabc'),
+ ... categories=list('abc'),
+ ... ordered=True)
+ [b, a, c]
+ Categories (3, object): [a < b < c]
+
+ See Also
+ --------
+ unique
+ CategoricalIndex.unique
+ Series.unique
+
+ """
+
+ # unlike np.unique, unique1d does not sort
+ unique_codes = unique1d(self.codes)
+ cat = self.copy()
+
+ # keep nan in codes
+ cat._codes = unique_codes
+
+ # exclude nan from indexer for categories
+ take_codes = unique_codes[unique_codes != -1]
+ if self.ordered:
+ take_codes = sorted(take_codes)
+ return cat.set_categories(cat.categories.take(take_codes))
+
+ def equals(self, other):
+ """
+ Returns True if categorical arrays are equal.
+
+ Parameters
+ ----------
+ other : `Categorical`
+
+ Returns
+ -------
+ are_equal : boolean
+ """
+ if self.is_dtype_equal(other):
+ if self.categories.equals(other.categories):
+ # fastpath to avoid re-coding
+ other_codes = other._codes
+ else:
+ other_codes = _recode_for_categories(other.codes,
+ other.categories,
+ self.categories)
+ return np.array_equal(self._codes, other_codes)
+ return False
+
+ def is_dtype_equal(self, other):
+ """
+ Returns True if categoricals are the same dtype
+ same categories, and same ordered
+
+ Parameters
+ ----------
+ other : Categorical
+
+ Returns
+ -------
+ are_equal : boolean
+ """
+
+ try:
+ return hash(self.dtype) == hash(other.dtype)
+ except (AttributeError, TypeError):
+ return False
+
+ def describe(self):
+ """ Describes this Categorical
+
+ Returns
+ -------
+ description: `DataFrame`
+ A dataframe with frequency and counts by category.
+ """
+ counts = self.value_counts(dropna=False)
+ freqs = counts / float(counts.sum())
+
+ from pandas.core.reshape.concat import concat
+ result = concat([counts, freqs], axis=1)
+ result.columns = ['counts', 'freqs']
+ result.index.name = 'categories'
+
+ return result
+
+ def repeat(self, repeats, *args, **kwargs):
+ """
+ Repeat elements of a Categorical.
+
+ See also
+ --------
+ numpy.ndarray.repeat
+
+ """
+ nv.validate_repeat(args, kwargs)
+ codes = self._codes.repeat(repeats)
+ return self._constructor(values=codes, categories=self.categories,
+ ordered=self.ordered, fastpath=True)
+
+# The Series.cat accessor
+
+
+class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
+ """
+ Accessor object for categorical properties of the Series values.
+
+ Be aware that assigning to `categories` is a inplace operation, while all
+ methods return new categorical data per default (but can be called with
+ `inplace=True`).
+
+ Parameters
+ ----------
+ data : Series or CategoricalIndex
+
+ Examples
+ --------
+ >>> s.cat.categories
+ >>> s.cat.categories = list('abc')
+ >>> s.cat.rename_categories(list('cab'))
+ >>> s.cat.reorder_categories(list('cab'))
+ >>> s.cat.add_categories(['d','e'])
+ >>> s.cat.remove_categories(['d'])
+ >>> s.cat.remove_unused_categories()
+ >>> s.cat.set_categories(list('abcde'))
+ >>> s.cat.as_ordered()
+ >>> s.cat.as_unordered()
+
+ """
+
+ def __init__(self, data):
+ self._validate(data)
+ self.categorical = data.values
+ self.index = data.index
+ self.name = data.name
+ self._freeze()
+
+ @staticmethod
+ def _validate(data):
+ if not is_categorical_dtype(data.dtype):
+ raise AttributeError("Can only use .cat accessor with a "
+ "'category' dtype")
+
+ def _delegate_property_get(self, name):
+ return getattr(self.categorical, name)
+
+ def _delegate_property_set(self, name, new_values):
+ return setattr(self.categorical, name, new_values)
+
+ @property
+ def codes(self):
+ from pandas import Series
+ return Series(self.categorical.codes, index=self.index)
+
+ def _delegate_method(self, name, *args, **kwargs):
+ from pandas import Series
+ method = getattr(self.categorical, name)
+ res = method(*args, **kwargs)
+ if res is not None:
+ return Series(res, index=self.index, name=self.name)
+
+
+CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
+ accessors=["categories",
+ "ordered"],
+ typ='property')
+CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
+ "rename_categories", "reorder_categories", "add_categories",
+ "remove_categories", "remove_unused_categories", "set_categories",
+ "as_ordered", "as_unordered"], typ='method')
+
+# utility routines
+
+
+def _get_codes_for_values(values, categories):
+ """
+ utility routine to turn values into codes given the specified categories
+ """
+
+ from pandas.core.algorithms import _get_data_algo, _hashtables
+ if not is_dtype_equal(values.dtype, categories.dtype):
+ values = _ensure_object(values)
+ categories = _ensure_object(categories)
+
+ (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
+ (_, _), cats = _get_data_algo(categories, _hashtables)
+ t = hash_klass(len(cats))
+ t.map_locations(cats)
+ return coerce_indexer_dtype(t.lookup(vals), cats)
+
+
+def _recode_for_categories(codes, old_categories, new_categories):
+ """
+ Convert a set of codes for to a new set of categories
+
+ Parameters
+ ----------
+ codes : array
+ old_categories, new_categories : Index
+
+ Returns
+ -------
+ new_codes : array
+
+ Examples
+ --------
+ >>> old_cat = pd.Index(['b', 'a', 'c'])
+ >>> new_cat = pd.Index(['a', 'b'])
+ >>> codes = np.array([0, 1, 1, 2])
+ >>> _recode_for_categories(codes, old_cat, new_cat)
+ array([ 1, 0, 0, -1])
+ """
+ from pandas.core.algorithms import take_1d
+
+ if len(old_categories) == 0:
+ # All null anyway, so just retain the nulls
+ return codes.copy()
+ indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
+ new_categories)
+ new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
+ return new_codes
+
+
+def _convert_to_list_like(list_like):
+ if hasattr(list_like, "dtype"):
+ return list_like
+ if isinstance(list_like, list):
+ return list_like
+ if (is_sequence(list_like) or isinstance(list_like, tuple) or
+ isinstance(list_like, types.GeneratorType)):
+ return list(list_like)
+ elif is_scalar(list_like):
+ return [list_like]
+ else:
+ # is this reached?
+ return [list_like]
+
+
+def _factorize_from_iterable(values):
+ """
+ Factorize an input `values` into `categories` and `codes`. Preserves
+ categorical dtype in `categories`.
+
+ *This is an internal function*
+
+ Parameters
+ ----------
+ values : list-like
+
+ Returns
+ -------
+ codes : ndarray
+ categories : Index
+ If `values` has a categorical dtype, then `categories` is
+ a CategoricalIndex keeping the categories and order of `values`.
+ """
+ from pandas.core.indexes.category import CategoricalIndex
+
+ if not is_list_like(values):
+ raise TypeError("Input must be list-like")
+
+ if is_categorical(values):
+ if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
+ values = values._values
+ categories = CategoricalIndex(values.categories,
+ categories=values.categories,
+ ordered=values.ordered)
+ codes = values.codes
+ else:
+ cat = Categorical(values, ordered=True)
+ categories = cat.categories
+ codes = cat.codes
+ return codes, categories
+
+
+def _factorize_from_iterables(iterables):
+ """
+ A higher-level wrapper over `_factorize_from_iterable`.
+
+ *This is an internal function*
+
+ Parameters
+ ----------
+ iterables : list-like of list-likes
+
+ Returns
+ -------
+ codes_list : list of ndarrays
+ categories_list : list of Indexes
+
+ Notes
+ -----
+ See `_factorize_from_iterable` for more info.
+ """
+ if len(iterables) == 0:
+ # For consistency, it should return a list of 2 lists.
+ return [[], []]
+ return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 7b11e37a14b51..17435dfc48bde 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1,2331 +1,7 @@
-# pylint: disable=E1101,W0232
+import warnings
-import numpy as np
-from warnings import warn
-import types
+# TODO: Remove after 0.23.x
+warnings.warn("'pandas.core' is private. Use 'pandas.Categorical'",
+ FutureWarning, stacklevel=2)
-from pandas import compat
-from pandas.compat import u, lzip
-from pandas._libs import lib, algos as libalgos
-
-from pandas.core.dtypes.generic import (
- ABCSeries, ABCIndexClass, ABCCategoricalIndex)
-from pandas.core.dtypes.missing import isna, notna
-from pandas.core.dtypes.cast import (
- maybe_infer_to_datetimelike,
- coerce_indexer_dtype)
-from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.common import (
- _ensure_int64,
- _ensure_object,
- _ensure_platform_int,
- is_dtype_equal,
- is_datetimelike,
- is_datetime64_dtype,
- is_timedelta64_dtype,
- is_categorical,
- is_categorical_dtype,
- is_list_like, is_sequence,
- is_scalar,
- is_dict_like)
-from pandas.core.common import is_null_slice, _maybe_box_datetimelike
-
-from pandas.core.algorithms import factorize, take_1d, unique1d
-from pandas.core.accessor import PandasDelegate
-from pandas.core.base import (PandasObject,
- NoNewAttributesMixin, _shared_docs)
-import pandas.core.common as com
-from pandas.core.missing import interpolate_2d
-from pandas.compat.numpy import function as nv
-from pandas.util._decorators import (
- Appender, cache_readonly, deprecate_kwarg, Substitution)
-
-from pandas.io.formats.terminal import get_terminal_size
-from pandas.util._validators import validate_bool_kwarg
-from pandas.core.config import get_option
-
-
-def _cat_compare_op(op):
- def f(self, other):
- # On python2, you can usually compare any type to any type, and
- # Categoricals can be seen as a custom type, but having different
- # results depending whether categories are the same or not is kind of
- # insane, so be a bit stricter here and use the python3 idea of
- # comparing only things of equal type.
- if not self.ordered:
- if op in ['__lt__', '__gt__', '__le__', '__ge__']:
- raise TypeError("Unordered Categoricals can only compare "
- "equality or not")
- if isinstance(other, Categorical):
- # Two Categoricals can only be be compared if the categories are
- # the same (maybe up to ordering, depending on ordered)
-
- msg = ("Categoricals can only be compared if "
- "'categories' are the same.")
- if len(self.categories) != len(other.categories):
- raise TypeError(msg + " Categories are different lengths")
- elif (self.ordered and not (self.categories ==
- other.categories).all()):
- raise TypeError(msg)
- elif not set(self.categories) == set(other.categories):
- raise TypeError(msg)
-
- if not (self.ordered == other.ordered):
- raise TypeError("Categoricals can only be compared if "
- "'ordered' is the same")
- if not self.ordered and not self.categories.equals(
- other.categories):
- # both unordered and different order
- other_codes = _get_codes_for_values(other, self.categories)
- else:
- other_codes = other._codes
-
- na_mask = (self._codes == -1) | (other_codes == -1)
- f = getattr(self._codes, op)
- ret = f(other_codes)
- if na_mask.any():
- # In other series, the leads to False, so do that here too
- ret[na_mask] = False
- return ret
-
- # Numpy-1.9 and earlier may convert a scalar to a zerodim array during
- # comparison operation when second arg has higher priority, e.g.
- #
- # cat[0] < cat
- #
- # With cat[0], for example, being ``np.int64(1)`` by the time it gets
- # into this function would become ``np.array(1)``.
- other = lib.item_from_zerodim(other)
- if is_scalar(other):
- if other in self.categories:
- i = self.categories.get_loc(other)
- return getattr(self._codes, op)(i)
- else:
- if op == '__eq__':
- return np.repeat(False, len(self))
- elif op == '__ne__':
- return np.repeat(True, len(self))
- else:
- msg = ("Cannot compare a Categorical for op {op} with a "
- "scalar, which is not a category.")
- raise TypeError(msg.format(op=op))
- else:
-
- # allow categorical vs object dtype array comparisons for equality
- # these are only positional comparisons
- if op in ['__eq__', '__ne__']:
- return getattr(np.array(self), op)(np.array(other))
-
- msg = ("Cannot compare a Categorical for op {op} with type {typ}."
- "\nIf you want to compare values, use 'np.asarray(cat) "
- "<op> other'.")
- raise TypeError(msg.format(op=op, typ=type(other)))
-
- f.__name__ = op
-
- return f
-
-
-def _maybe_to_categorical(array):
- """
- Coerce to a categorical if a series is given.
-
- Internal use ONLY.
- """
- if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
- return array._values
- elif isinstance(array, np.ndarray):
- return Categorical(array)
- return array
-
-
-_codes_doc = """The category codes of this categorical.
-
-Level codes are an array if integer which are the positions of the real
-values in the categories array.
-
-There is not setter, use the other categorical methods and the normal item
-setter to change values in the categorical.
-"""
-
-
-class Categorical(PandasObject):
- """
- Represents a categorical variable in classic R / S-plus fashion
-
- `Categoricals` can only take on only a limited, and usually fixed, number
- of possible values (`categories`). In contrast to statistical categorical
- variables, a `Categorical` might have an order, but numerical operations
- (additions, divisions, ...) are not possible.
-
- All values of the `Categorical` are either in `categories` or `np.nan`.
- Assigning values outside of `categories` will raise a `ValueError`. Order
- is defined by the order of the `categories`, not lexical order of the
- values.
-
- Parameters
- ----------
- values : list-like
- The values of the categorical. If categories are given, values not in
- categories will be replaced with NaN.
- categories : Index-like (unique), optional
- The unique categories for this categorical. If not given, the
- categories are assumed to be the unique values of values.
- ordered : boolean, (default False)
- Whether or not this categorical is treated as a ordered categorical.
- If not given, the resulting categorical will not be ordered.
- dtype : CategoricalDtype
- An instance of ``CategoricalDtype`` to use for this categorical
-
- .. versionadded:: 0.21.0
-
- Attributes
- ----------
- categories : Index
- The categories of this categorical
- codes : ndarray
- The codes (integer positions, which point to the categories) of this
- categorical, read only.
- ordered : boolean
- Whether or not this Categorical is ordered.
- dtype : CategoricalDtype
- The instance of ``CategoricalDtype`` storing the ``categories``
- and ``ordered``.
-
- .. versionadded:: 0.21.0
-
- Methods
- -------
- from_codes
- __array__
-
- Raises
- ------
- ValueError
- If the categories do not validate.
- TypeError
- If an explicit ``ordered=True`` is given but no `categories` and the
- `values` are not sortable.
-
- Examples
- --------
- >>> pd.Categorical([1, 2, 3, 1, 2, 3])
- [1, 2, 3, 1, 2, 3]
- Categories (3, int64): [1, 2, 3]
-
- >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
- [a, b, c, a, b, c]
- Categories (3, object): [a, b, c]
-
- Ordered `Categoricals` can be sorted according to the custom order
- of the categories and can have a min and max value.
-
- >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
- ... categories=['c', 'b', 'a'])
- >>> c
- [a, b, c, a, b, c]
- Categories (3, object): [c < b < a]
- >>> c.min()
- 'c'
-
- Notes
- -----
- See the `user guide
- <http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
-
- See also
- --------
- pandas.api.types.CategoricalDtype : Type for categorical data
- CategoricalIndex : An Index with an underlying ``Categorical``
- """
-
- # For comparisons, so that numpy uses our implementation if the compare
- # ops, which raise
- __array_priority__ = 1000
- _dtype = CategoricalDtype()
- _deprecations = frozenset(['labels'])
- _typ = 'categorical'
-
- def __init__(self, values, categories=None, ordered=None, dtype=None,
- fastpath=False):
-
- # Ways of specifying the dtype (prioritized ordered)
- # 1. dtype is a CategoricalDtype
- # a.) with known categories, use dtype.categories
- # b.) else with Categorical values, use values.dtype
- # c.) else, infer from values
- # d.) specifying dtype=CategoricalDtype and categories is an error
- # 2. dtype is a string 'category'
- # a.) use categories, ordered
- # b.) use values.dtype
- # c.) infer from values
- # 3. dtype is None
- # a.) use categories, ordered
- # b.) use values.dtype
- # c.) infer from values
-
- if dtype is not None:
- # The dtype argument takes precedence over values.dtype (if any)
- if isinstance(dtype, compat.string_types):
- if dtype == 'category':
- dtype = CategoricalDtype(categories, ordered)
- else:
- msg = "Unknown `dtype` {dtype}"
- raise ValueError(msg.format(dtype=dtype))
- elif categories is not None or ordered is not None:
- raise ValueError("Cannot specify both `dtype` and `categories`"
- " or `ordered`.")
-
- categories = dtype.categories
- ordered = dtype.ordered
-
- elif is_categorical(values):
- # If no "dtype" was passed, use the one from "values", but honor
- # the "ordered" and "categories" arguments
- dtype = values.dtype._from_categorical_dtype(values.dtype,
- categories, ordered)
- else:
- # If dtype=None and values is not categorical, create a new dtype
- dtype = CategoricalDtype(categories, ordered)
-
- # At this point, dtype is always a CategoricalDtype
- # if dtype.categories is None, we are inferring
-
- if fastpath:
- self._codes = coerce_indexer_dtype(values, categories)
- self._dtype = dtype
- return
-
- # null_mask indicates missing values we want to exclude from inference.
- # This means: only missing values in list-likes (not arrays/ndframes).
- null_mask = np.array(False)
-
- # sanitize input
- if is_categorical_dtype(values):
- if dtype.categories is None:
- dtype = CategoricalDtype(values.categories, dtype.ordered)
-
- elif not isinstance(values, (ABCIndexClass, ABCSeries)):
- # _sanitize_array coerces np.nan to a string under certain versions
- # of numpy
- values = maybe_infer_to_datetimelike(values, convert_dates=True)
- if not isinstance(values, np.ndarray):
- values = _convert_to_list_like(values)
- from pandas.core.series import _sanitize_array
- # By convention, empty lists result in object dtype:
- if len(values) == 0:
- sanitize_dtype = 'object'
- else:
- sanitize_dtype = None
- null_mask = isna(values)
- if null_mask.any():
- values = [values[idx] for idx in np.where(~null_mask)[0]]
- values = _sanitize_array(values, None, dtype=sanitize_dtype)
-
- if dtype.categories is None:
- try:
- codes, categories = factorize(values, sort=True)
- except TypeError:
- codes, categories = factorize(values, sort=False)
- if dtype.ordered:
- # raise, as we don't have a sortable data structure and so
- # the user should give us one by specifying categories
- raise TypeError("'values' is not ordered, please "
- "explicitly specify the categories order "
- "by passing in a categories argument.")
- except ValueError:
-
- # FIXME
- raise NotImplementedError("> 1 ndim Categorical are not "
- "supported at this time")
-
- # we're inferring from values
- dtype = CategoricalDtype(categories, dtype.ordered)
-
- elif is_categorical_dtype(values):
- old_codes = (values.cat.codes if isinstance(values, ABCSeries)
- else values.codes)
- codes = _recode_for_categories(old_codes, values.dtype.categories,
- dtype.categories)
-
- else:
- codes = _get_codes_for_values(values, dtype.categories)
-
- if null_mask.any():
- # Reinsert -1 placeholders for previously removed missing values
- full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
- full_codes[~null_mask] = codes
- codes = full_codes
-
- self._dtype = dtype
- self._codes = coerce_indexer_dtype(codes, dtype.categories)
-
- @property
- def categories(self):
- """The categories of this categorical.
-
- Setting assigns new values to each category (effectively a rename of
- each individual category).
-
- The assigned value has to be a list-like object. All items must be
- unique and the number of items in the new categories must be the same
- as the number of items in the old categories.
-
- Assigning to `categories` is a inplace operation!
-
- Raises
- ------
- ValueError
- If the new categories do not validate as categories or if the
- number of new categories is unequal the number of old categories
-
- See also
- --------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
- """
- return self.dtype.categories
-
- @categories.setter
- def categories(self, categories):
- new_dtype = CategoricalDtype(categories, ordered=self.ordered)
- if (self.dtype.categories is not None and
- len(self.dtype.categories) != len(new_dtype.categories)):
- raise ValueError("new categories need to have the same number of "
- "items as the old categories!")
- self._dtype = new_dtype
-
- @property
- def ordered(self):
- """Whether the categories have an ordered relationship"""
- return self.dtype.ordered
-
- @property
- def dtype(self):
- """The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
- return self._dtype
-
- @property
- def _constructor(self):
- return Categorical
-
- def copy(self):
- """ Copy constructor. """
- return self._constructor(values=self._codes.copy(),
- categories=self.categories,
- ordered=self.ordered,
- fastpath=True)
-
- def astype(self, dtype, copy=True):
- """
- Coerce this type to another dtype
-
- Parameters
- ----------
- dtype : numpy dtype or pandas type
- copy : bool, default True
- By default, astype always returns a newly allocated object.
- If copy is set to False and dtype is categorical, the original
- object is returned.
-
- .. versionadded:: 0.19.0
-
- """
- if is_categorical_dtype(dtype):
- # GH 10696/18593
- dtype = self.dtype._update_dtype(dtype)
- self = self.copy() if copy else self
- if dtype == self.dtype:
- return self
- return self._set_dtype(dtype)
- return np.array(self, dtype=dtype, copy=copy)
-
- @cache_readonly
- def ndim(self):
- """Number of dimensions of the Categorical """
- return self._codes.ndim
-
- @cache_readonly
- def size(self):
- """ return the len of myself """
- return len(self)
-
- @cache_readonly
- def itemsize(self):
- """ return the size of a single category """
- return self.categories.itemsize
-
- def tolist(self):
- """
- Return a list of the values.
-
- These are each a scalar type, which is a Python scalar
- (for str, int, float) or a pandas scalar
- (for Timestamp/Timedelta/Interval/Period)
- """
- if is_datetimelike(self.categories):
- return [_maybe_box_datetimelike(x) for x in self]
- return np.array(self).tolist()
-
- @property
- def base(self):
- """ compat, we are always our own object """
- return None
-
- @classmethod
- def _from_inferred_categories(cls, inferred_categories, inferred_codes,
- dtype):
- """Construct a Categorical from inferred values
-
- For inferred categories (`dtype` is None) the categories are sorted.
- For explicit `dtype`, the `inferred_categories` are cast to the
- appropriate type.
-
- Parameters
- ----------
-
- inferred_categories : Index
- inferred_codes : Index
- dtype : CategoricalDtype or 'category'
-
- Returns
- -------
- Categorical
- """
- from pandas import Index, to_numeric, to_datetime, to_timedelta
-
- cats = Index(inferred_categories)
-
- known_categories = (isinstance(dtype, CategoricalDtype) and
- dtype.categories is not None)
-
- if known_categories:
- # Convert to a specialzed type with `dtype` if specified
- if dtype.categories.is_numeric():
- cats = to_numeric(inferred_categories, errors='coerce')
- elif is_datetime64_dtype(dtype.categories):
- cats = to_datetime(inferred_categories, errors='coerce')
- elif is_timedelta64_dtype(dtype.categories):
- cats = to_timedelta(inferred_categories, errors='coerce')
-
- if known_categories:
- # recode from observation oder to dtype.categories order
- categories = dtype.categories
- codes = _recode_for_categories(inferred_codes, cats, categories)
- elif not cats.is_monotonic_increasing:
- # sort categories and recode for unknown categories
- unsorted = cats.copy()
- categories = cats.sort_values()
- codes = _recode_for_categories(inferred_codes, unsorted,
- categories)
- dtype = CategoricalDtype(categories, ordered=False)
- else:
- dtype = CategoricalDtype(cats, ordered=False)
- codes = inferred_codes
-
- return cls(codes, dtype=dtype, fastpath=True)
-
- @classmethod
- def from_codes(cls, codes, categories, ordered=False):
- """
- Make a Categorical type from codes and categories arrays.
-
- This constructor is useful if you already have codes and categories and
- so do not need the (computation intensive) factorization step, which is
- usually done on the constructor.
-
- If your data does not follow this convention, please use the normal
- constructor.
-
- Parameters
- ----------
- codes : array-like, integers
- An integer array, where each integer points to a category in
- categories or -1 for NaN
- categories : index-like
- The categories for the categorical. Items need to be unique.
- ordered : boolean, (default False)
- Whether or not this categorical is treated as a ordered
- categorical. If not given, the resulting categorical will be
- unordered.
- """
- try:
- codes = np.asarray(codes, np.int64)
- except:
- raise ValueError(
- "codes need to be convertible to an arrays of integers")
-
- categories = CategoricalDtype._validate_categories(categories)
-
- if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
- raise ValueError("codes need to be between -1 and "
- "len(categories)-1")
-
- return cls(codes, categories=categories, ordered=ordered,
- fastpath=True)
-
- _codes = None
-
- def _get_codes(self):
- """ Get the codes.
-
- Returns
- -------
- codes : integer array view
- A non writable view of the `codes` array.
- """
- v = self._codes.view()
- v.flags.writeable = False
- return v
-
- def _set_codes(self, codes):
- """
- Not settable by the user directly
- """
- raise ValueError("cannot set Categorical codes directly")
-
- codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
-
- def _set_categories(self, categories, fastpath=False):
- """ Sets new categories inplace
-
- Parameters
- ----------
- fastpath : boolean (default: False)
- Don't perform validation of the categories for uniqueness or nulls
-
- Examples
- --------
- >>> c = Categorical(['a', 'b'])
- >>> c
- [a, b]
- Categories (2, object): [a, b]
-
- >>> c._set_categories(pd.Index(['a', 'c']))
- >>> c
- [a, c]
- Categories (2, object): [a, c]
- """
-
- if fastpath:
- new_dtype = CategoricalDtype._from_fastpath(categories,
- self.ordered)
- else:
- new_dtype = CategoricalDtype(categories, ordered=self.ordered)
- if (not fastpath and self.dtype.categories is not None and
- len(new_dtype.categories) != len(self.dtype.categories)):
- raise ValueError("new categories need to have the same number of "
- "items than the old categories!")
-
- self._dtype = new_dtype
-
- def _codes_for_groupby(self, sort):
- """
- If sort=False, return a copy of self, coded with categories as
- returned by .unique(), followed by any categories not appearing in
- the data. If sort=True, return self.
-
- This method is needed solely to ensure the categorical index of the
- GroupBy result has categories in the order of appearance in the data
- (GH-8868).
-
- Parameters
- ----------
- sort : boolean
- The value of the sort parameter groupby was called with.
-
- Returns
- -------
- Categorical
- If sort=False, the new categories are set to the order of
- appearance in codes (unless ordered=True, in which case the
- original order is preserved), followed by any unrepresented
- categories in the original order.
- """
-
- # Already sorted according to self.categories; all is fine
- if sort:
- return self
-
- # sort=False should order groups in as-encountered order (GH-8868)
- cat = self.unique()
-
- # But for groupby to work, all categories should be present,
- # including those missing from the data (GH-13179), which .unique()
- # above dropped
- cat.add_categories(
- self.categories[~self.categories.isin(cat.categories)],
- inplace=True)
-
- return self.reorder_categories(cat.categories)
-
- def _set_dtype(self, dtype):
- """Internal method for directly updating the CategoricalDtype
-
- Parameters
- ----------
- dtype : CategoricalDtype
-
- Notes
- -----
- We don't do any validation here. It's assumed that the dtype is
- a (valid) instance of `CategoricalDtype`.
- """
- codes = _recode_for_categories(self.codes, self.categories,
- dtype.categories)
- return type(self)(codes, dtype=dtype, fastpath=True)
-
- def set_ordered(self, value, inplace=False):
- """
- Sets the ordered attribute to the boolean value
-
- Parameters
- ----------
- value : boolean to set whether this categorical is ordered (True) or
- not (False)
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to the value
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- new_dtype = CategoricalDtype(self.categories, ordered=value)
- cat = self if inplace else self.copy()
- cat._dtype = new_dtype
- if not inplace:
- return cat
-
- def as_ordered(self, inplace=False):
- """
- Sets the Categorical to be ordered
-
- Parameters
- ----------
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to True
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- return self.set_ordered(True, inplace=inplace)
-
- def as_unordered(self, inplace=False):
- """
- Sets the Categorical to be unordered
-
- Parameters
- ----------
- inplace : boolean (default: False)
- Whether or not to set the ordered attribute inplace or return a copy
- of this categorical with ordered set to False
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- return self.set_ordered(False, inplace=inplace)
-
- def set_categories(self, new_categories, ordered=None, rename=False,
- inplace=False):
- """ Sets the categories to the specified new_categories.
-
- `new_categories` can include new categories (which will result in
- unused categories) or remove old categories (which results in values
- set to NaN). If `rename==True`, the categories will simple be renamed
- (less or more items than in old categories will result in values set to
- NaN or in unused categories respectively).
-
- This method can be used to perform more than one action of adding,
- removing, and reordering simultaneously and is therefore faster than
- performing the individual steps via the more specialised methods.
-
- On the other hand this methods does not do checks (e.g., whether the
- old categories are included in the new categories on a reorder), which
- can result in surprising changes, for example when using special string
- dtypes on python3, which does not considers a S1 string equal to a
- single char python string.
-
- Raises
- ------
- ValueError
- If new_categories does not validate as categories
-
- Parameters
- ----------
- new_categories : Index-like
- The categories in new order.
- ordered : boolean, (default: False)
- Whether or not the categorical is treated as a ordered categorical.
- If not given, do not change the ordered information.
- rename : boolean (default: False)
- Whether or not the new_categories should be considered as a rename
- of the old categories or as reordered categories.
- inplace : boolean (default: False)
- Whether or not to reorder the categories inplace or return a copy of
- this categorical with reordered categories.
-
- Returns
- -------
- cat : Categorical with reordered categories or None if inplace.
-
- See also
- --------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if ordered is None:
- ordered = self.dtype.ordered
- new_dtype = CategoricalDtype(new_categories, ordered=ordered)
-
- cat = self if inplace else self.copy()
- if rename:
- if (cat.dtype.categories is not None and
- len(new_dtype.categories) < len(cat.dtype.categories)):
- # remove all _codes which are larger and set to -1/NaN
- self._codes[self._codes >= len(new_dtype.categories)] = -1
- else:
- codes = _recode_for_categories(self.codes, self.categories,
- new_dtype.categories)
- cat._codes = codes
- cat._dtype = new_dtype
-
- if not inplace:
- return cat
-
- def rename_categories(self, new_categories, inplace=False):
- """ Renames categories.
-
- Raises
- ------
- ValueError
- If new categories are list-like and do not have the same number of
- items than the current categories or do not validate as categories
-
- Parameters
- ----------
- new_categories : list-like, dict-like or callable
-
- * list-like: all items must be unique and the number of items in
- the new categories must match the existing number of categories.
-
- * dict-like: specifies a mapping from
- old categories to new. Categories not contained in the mapping
- are passed through and extra categories in the mapping are
- ignored.
-
- .. versionadded:: 0.21.0
-
- * callable : a callable that is called on all items in the old
- categories and whose return values comprise the new categories.
-
- .. versionadded:: 0.23.0
-
- .. warning::
-
- Currently, Series are considered list like. In a future version
- of pandas they'll be considered dict-like.
-
- inplace : boolean (default: False)
- Whether or not to rename the categories inplace or return a copy of
- this categorical with renamed categories.
-
- Returns
- -------
- cat : Categorical or None
- With ``inplace=False``, the new categorical is returned.
- With ``inplace=True``, there is no return value.
-
- See also
- --------
- reorder_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
-
- Examples
- --------
- >>> c = Categorical(['a', 'a', 'b'])
- >>> c.rename_categories([0, 1])
- [0, 0, 1]
- Categories (2, int64): [0, 1]
-
- For dict-like ``new_categories``, extra keys are ignored and
- categories not in the dictionary are passed through
-
- >>> c.rename_categories({'a': 'A', 'c': 'C'})
- [A, A, b]
- Categories (2, object): [A, b]
-
- You may also provide a callable to create the new categories
-
- >>> c.rename_categories(lambda x: x.upper())
- [A, A, B]
- Categories (2, object): [A, B]
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- cat = self if inplace else self.copy()
-
- if isinstance(new_categories, ABCSeries):
- msg = ("Treating Series 'new_categories' as a list-like and using "
- "the values. In a future version, 'rename_categories' will "
- "treat Series like a dictionary.\n"
- "For dict-like, use 'new_categories.to_dict()'\n"
- "For list-like, use 'new_categories.values'.")
- warn(msg, FutureWarning, stacklevel=2)
- new_categories = list(new_categories)
-
- if is_dict_like(new_categories):
- cat.categories = [new_categories.get(item, item)
- for item in cat.categories]
- elif callable(new_categories):
- cat.categories = [new_categories(item) for item in cat.categories]
- else:
- cat.categories = new_categories
- if not inplace:
- return cat
-
- def reorder_categories(self, new_categories, ordered=None, inplace=False):
- """ Reorders categories as specified in new_categories.
-
- `new_categories` need to include all old categories and no new category
- items.
-
- Raises
- ------
- ValueError
- If the new categories do not contain all old category items or any
- new ones
-
- Parameters
- ----------
- new_categories : Index-like
- The categories in new order.
- ordered : boolean, optional
- Whether or not the categorical is treated as a ordered categorical.
- If not given, do not change the ordered information.
- inplace : boolean (default: False)
- Whether or not to reorder the categories inplace or return a copy of
- this categorical with reordered categories.
-
- Returns
- -------
- cat : Categorical with reordered categories or None if inplace.
-
- See also
- --------
- rename_categories
- add_categories
- remove_categories
- remove_unused_categories
- set_categories
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if set(self.dtype.categories) != set(new_categories):
- raise ValueError("items in new_categories are not the same as in "
- "old categories")
- return self.set_categories(new_categories, ordered=ordered,
- inplace=inplace)
-
- def add_categories(self, new_categories, inplace=False):
- """ Add new categories.
-
- `new_categories` will be included at the last/highest place in the
- categories and will be unused directly after this call.
-
- Raises
- ------
- ValueError
- If the new categories include old categories or do not validate as
- categories
-
- Parameters
- ----------
- new_categories : category or list-like of category
- The new categories to be included.
- inplace : boolean (default: False)
- Whether or not to add the categories inplace or return a copy of
- this categorical with added categories.
-
- Returns
- -------
- cat : Categorical with new categories added or None if inplace.
-
- See also
- --------
- rename_categories
- reorder_categories
- remove_categories
- remove_unused_categories
- set_categories
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if not is_list_like(new_categories):
- new_categories = [new_categories]
- already_included = set(new_categories) & set(self.dtype.categories)
- if len(already_included) != 0:
- msg = ("new categories must not include old categories: "
- "{already_included!s}")
- raise ValueError(msg.format(already_included=already_included))
- new_categories = list(self.dtype.categories) + list(new_categories)
- new_dtype = CategoricalDtype(new_categories, self.ordered)
-
- cat = self if inplace else self.copy()
- cat._dtype = new_dtype
- cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
- if not inplace:
- return cat
-
- def remove_categories(self, removals, inplace=False):
- """ Removes the specified categories.
-
- `removals` must be included in the old categories. Values which were in
- the removed categories will be set to NaN
-
- Raises
- ------
- ValueError
- If the removals are not contained in the categories
-
- Parameters
- ----------
- removals : category or list of categories
- The categories which should be removed.
- inplace : boolean (default: False)
- Whether or not to remove the categories inplace or return a copy of
- this categorical with removed categories.
-
- Returns
- -------
- cat : Categorical with removed categories or None if inplace.
-
- See also
- --------
- rename_categories
- reorder_categories
- add_categories
- remove_unused_categories
- set_categories
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if not is_list_like(removals):
- removals = [removals]
-
- removal_set = set(list(removals))
- not_included = removal_set - set(self.dtype.categories)
- new_categories = [c for c in self.dtype.categories
- if c not in removal_set]
-
- # GH 10156
- if any(isna(removals)):
- not_included = [x for x in not_included if notna(x)]
- new_categories = [x for x in new_categories if notna(x)]
-
- if len(not_included) != 0:
- msg = "removals must all be in old categories: {not_included!s}"
- raise ValueError(msg.format(not_included=not_included))
-
- return self.set_categories(new_categories, ordered=self.ordered,
- rename=False, inplace=inplace)
-
- def remove_unused_categories(self, inplace=False):
- """ Removes categories which are not used.
-
- Parameters
- ----------
- inplace : boolean (default: False)
- Whether or not to drop unused categories inplace or return a copy of
- this categorical with unused categories dropped.
-
- Returns
- -------
- cat : Categorical with unused categories dropped or None if inplace.
-
- See also
- --------
- rename_categories
- reorder_categories
- add_categories
- remove_categories
- set_categories
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- cat = self if inplace else self.copy()
- idx, inv = np.unique(cat._codes, return_inverse=True)
-
- if idx.size != 0 and idx[0] == -1: # na sentinel
- idx, inv = idx[1:], inv - 1
-
- new_categories = cat.dtype.categories.take(idx)
- new_dtype = CategoricalDtype._from_fastpath(new_categories,
- ordered=self.ordered)
- cat._dtype = new_dtype
- cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
-
- if not inplace:
- return cat
-
- def map(self, mapper):
- """Apply mapper function to its categories (not codes).
-
- Parameters
- ----------
- mapper : callable
- Function to be applied. When all categories are mapped
- to different categories, the result will be Categorical which has
- the same order property as the original. Otherwise, the result will
- be np.ndarray.
-
- Returns
- -------
- applied : Categorical or Index.
-
- """
- new_categories = self.categories.map(mapper)
- try:
- return self.from_codes(self._codes.copy(),
- categories=new_categories,
- ordered=self.ordered)
- except ValueError:
- return np.take(new_categories, self._codes)
-
- __eq__ = _cat_compare_op('__eq__')
- __ne__ = _cat_compare_op('__ne__')
- __lt__ = _cat_compare_op('__lt__')
- __gt__ = _cat_compare_op('__gt__')
- __le__ = _cat_compare_op('__le__')
- __ge__ = _cat_compare_op('__ge__')
-
- # for Series/ndarray like compat
- @property
- def shape(self):
- """ Shape of the Categorical.
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- shape : tuple
- """
-
- return tuple([len(self._codes)])
-
- def shift(self, periods):
- """
- Shift Categorical by desired number of periods.
-
- Parameters
- ----------
- periods : int
- Number of periods to move, can be positive or negative
-
- Returns
- -------
- shifted : Categorical
- """
- # since categoricals always have ndim == 1, an axis parameter
- # doesn't make any sense here.
- codes = self.codes
- if codes.ndim > 1:
- raise NotImplementedError("Categorical with ndim > 1.")
- if np.prod(codes.shape) and (periods != 0):
- codes = np.roll(codes, _ensure_platform_int(periods), axis=0)
- if periods > 0:
- codes[:periods] = -1
- else:
- codes[periods:] = -1
-
- return self.from_codes(codes, categories=self.categories,
- ordered=self.ordered)
-
- def __array__(self, dtype=None):
- """
- The numpy array interface.
-
- Returns
- -------
- values : numpy array
- A numpy array of either the specified dtype or,
- if dtype==None (default), the same dtype as
- categorical.categories.dtype
- """
- ret = take_1d(self.categories.values, self._codes)
- if dtype and not is_dtype_equal(dtype, self.categories.dtype):
- return np.asarray(ret, dtype)
- return ret
-
- def __setstate__(self, state):
- """Necessary for making this object picklable"""
- if not isinstance(state, dict):
- raise Exception('invalid pickle state')
-
- # Provide compatibility with pre-0.15.0 Categoricals.
- if '_categories' not in state and '_levels' in state:
- state['_categories'] = self.dtype._validate_categories(state.pop(
- '_levels'))
- if '_codes' not in state and 'labels' in state:
- state['_codes'] = coerce_indexer_dtype(
- state.pop('labels'), state['_categories'])
-
- # 0.16.0 ordered change
- if '_ordered' not in state:
-
- # >=15.0 < 0.16.0
- if 'ordered' in state:
- state['_ordered'] = state.pop('ordered')
- else:
- state['_ordered'] = False
-
- # 0.21.0 CategoricalDtype change
- if '_dtype' not in state:
- state['_dtype'] = CategoricalDtype(state['_categories'],
- state['_ordered'])
-
- for k, v in compat.iteritems(state):
- setattr(self, k, v)
-
- @property
- def T(self):
- return self
-
- @property
- def nbytes(self):
- return self._codes.nbytes + self.dtype.categories.values.nbytes
-
- def memory_usage(self, deep=False):
- """
- Memory usage of my values
-
- Parameters
- ----------
- deep : bool
- Introspect the data deeply, interrogate
- `object` dtypes for system-level memory consumption
-
- Returns
- -------
- bytes used
-
- Notes
- -----
- Memory usage does not include memory consumed by elements that
- are not components of the array if deep=False
-
- See Also
- --------
- numpy.ndarray.nbytes
- """
- return self._codes.nbytes + self.dtype.categories.memory_usage(
- deep=deep)
-
- @Substitution(klass='Categorical')
- @Appender(_shared_docs['searchsorted'])
- @deprecate_kwarg(old_arg_name='v', new_arg_name='value')
- def searchsorted(self, value, side='left', sorter=None):
- if not self.ordered:
- raise ValueError("Categorical not ordered\nyou can use "
- ".as_ordered() to change the Categorical to an "
- "ordered one")
-
- from pandas.core.series import Series
-
- values_as_codes = _get_codes_for_values(Series(value).values,
- self.categories)
-
- if -1 in values_as_codes:
- raise ValueError("Value(s) to be inserted must be in categories.")
-
- return self.codes.searchsorted(values_as_codes, side=side,
- sorter=sorter)
-
- def isna(self):
- """
- Detect missing values
-
- Both missing values (-1 in .codes) and NA as a category are detected.
-
- Returns
- -------
- a boolean array of whether my values are null
-
- See also
- --------
- isna : top-level isna
- isnull : alias of isna
- Categorical.notna : boolean inverse of Categorical.isna
-
- """
-
- ret = self._codes == -1
-
- # String/object and float categories can hold np.nan
- if self.categories.dtype.kind in ['S', 'O', 'f']:
- if np.nan in self.categories:
- nan_pos = np.where(isna(self.categories))[0]
- # we only have one NA in categories
- ret = np.logical_or(ret, self._codes == nan_pos)
- return ret
- isnull = isna
-
- def notna(self):
- """
- Inverse of isna
-
- Both missing values (-1 in .codes) and NA as a category are detected as
- null.
-
- Returns
- -------
- a boolean array of whether my values are not null
-
- See also
- --------
- notna : top-level notna
- notnull : alias of notna
- Categorical.isna : boolean inverse of Categorical.notna
-
- """
- return ~self.isna()
- notnull = notna
-
- def put(self, *args, **kwargs):
- """
- Replace specific elements in the Categorical with given values.
- """
- raise NotImplementedError(("'put' is not yet implemented "
- "for Categorical"))
-
- def dropna(self):
- """
- Return the Categorical without null values.
-
- Both missing values (-1 in .codes) and NA as a category are detected.
- NA is removed from the categories if present.
-
- Returns
- -------
- valid : Categorical
- """
- result = self[self.notna()]
- if isna(result.categories).any():
- result = result.remove_categories([np.nan])
- return result
-
- def value_counts(self, dropna=True):
- """
- Returns a Series containing counts of each category.
-
- Every category will have an entry, even those with a count of 0.
-
- Parameters
- ----------
- dropna : boolean, default True
- Don't include counts of NaN, even if NaN is a category.
-
- Returns
- -------
- counts : Series
-
- See Also
- --------
- Series.value_counts
-
- """
- from numpy import bincount
- from pandas import isna, Series, CategoricalIndex
-
- obj = (self.remove_categories([np.nan]) if dropna and
- isna(self.categories).any() else self)
- code, cat = obj._codes, obj.categories
- ncat, mask = len(cat), 0 <= code
- ix, clean = np.arange(ncat), mask.all()
-
- if dropna or clean:
- obs = code if clean else code[mask]
- count = bincount(obs, minlength=ncat or None)
- else:
- count = bincount(np.where(mask, code, ncat))
- ix = np.append(ix, -1)
-
- ix = self._constructor(ix, dtype=self.dtype,
- fastpath=True)
-
- return Series(count, index=CategoricalIndex(ix), dtype='int64')
-
- def get_values(self):
- """ Return the values.
-
- For internal compatibility with pandas formatting.
-
- Returns
- -------
- values : numpy array
- A numpy array of the same dtype as categorical.categories.dtype or
- Index if datetime / periods
- """
- # if we are a datetime and period index, return Index to keep metadata
- if is_datetimelike(self.categories):
- return self.categories.take(self._codes, fill_value=np.nan)
- return np.array(self)
-
- def check_for_ordered(self, op):
- """ assert that we are ordered """
- if not self.ordered:
- raise TypeError("Categorical is not ordered for operation {op}\n"
- "you can use .as_ordered() to change the "
- "Categorical to an ordered one\n".format(op=op))
-
- def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
- """
- Returns the indices that would sort the Categorical instance if
- 'sort_values' was called. This function is implemented to provide
- compatibility with numpy ndarray objects.
-
- While an ordering is applied to the category values, arg-sorting
- in this context refers more to organizing and grouping together
- based on matching category values. Thus, this function can be
- called on an unordered Categorical instance unlike the functions
- 'Categorical.min' and 'Categorical.max'.
-
- Returns
- -------
- argsorted : numpy array
-
- See also
- --------
- numpy.ndarray.argsort
- """
- ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
- result = np.argsort(self._codes.copy(), kind=kind, **kwargs)
- if not ascending:
- result = result[::-1]
- return result
-
- def sort_values(self, inplace=False, ascending=True, na_position='last'):
- """ Sorts the Categorical by category value returning a new
- Categorical by default.
-
- While an ordering is applied to the category values, sorting in this
- context refers more to organizing and grouping together based on
- matching category values. Thus, this function can be called on an
- unordered Categorical instance unlike the functions 'Categorical.min'
- and 'Categorical.max'.
-
- Parameters
- ----------
- inplace : boolean, default False
- Do operation in place.
- ascending : boolean, default True
- Order ascending. Passing False orders descending. The
- ordering parameter provides the method by which the
- category values are organized.
- na_position : {'first', 'last'} (optional, default='last')
- 'first' puts NaNs at the beginning
- 'last' puts NaNs at the end
-
- Returns
- -------
- y : Categorical or None
-
- See Also
- --------
- Categorical.sort
- Series.sort_values
-
- Examples
- --------
- >>> c = pd.Categorical([1, 2, 2, 1, 5])
- >>> c
- [1, 2, 2, 1, 5]
- Categories (3, int64): [1, 2, 5]
- >>> c.sort_values()
- [1, 1, 2, 2, 5]
- Categories (3, int64): [1, 2, 5]
- >>> c.sort_values(ascending=False)
- [5, 2, 2, 1, 1]
- Categories (3, int64): [1, 2, 5]
-
- Inplace sorting can be done as well:
-
- >>> c.sort_values(inplace=True)
- >>> c
- [1, 1, 2, 2, 5]
- Categories (3, int64): [1, 2, 5]
- >>>
- >>> c = pd.Categorical([1, 2, 2, 1, 5])
-
- 'sort_values' behaviour with NaNs. Note that 'na_position'
- is independent of the 'ascending' parameter:
-
- >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
- >>> c
- [NaN, 2.0, 2.0, NaN, 5.0]
- Categories (2, int64): [2, 5]
- >>> c.sort_values()
- [2.0, 2.0, 5.0, NaN, NaN]
- Categories (2, int64): [2, 5]
- >>> c.sort_values(ascending=False)
- [5.0, 2.0, 2.0, NaN, NaN]
- Categories (2, int64): [2, 5]
- >>> c.sort_values(na_position='first')
- [NaN, NaN, 2.0, 2.0, 5.0]
- Categories (2, int64): [2, 5]
- >>> c.sort_values(ascending=False, na_position='first')
- [NaN, NaN, 5.0, 2.0, 2.0]
- Categories (2, int64): [2, 5]
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if na_position not in ['last', 'first']:
- msg = 'invalid na_position: {na_position!r}'
- raise ValueError(msg.format(na_position=na_position))
-
- codes = np.sort(self._codes)
- if not ascending:
- codes = codes[::-1]
-
- # NaN handling
- na_mask = (codes == -1)
- if na_mask.any():
- n_nans = len(codes[na_mask])
- if na_position == "first":
- # in this case sort to the front
- new_codes = codes.copy()
- new_codes[0:n_nans] = -1
- new_codes[n_nans:] = codes[~na_mask]
- codes = new_codes
- elif na_position == "last":
- # ... and to the end
- new_codes = codes.copy()
- pos = len(codes) - n_nans
- new_codes[0:pos] = codes[~na_mask]
- new_codes[pos:] = -1
- codes = new_codes
- if inplace:
- self._codes = codes
- return
- else:
- return self._constructor(values=codes, categories=self.categories,
- ordered=self.ordered, fastpath=True)
-
- def _values_for_rank(self):
- """
- For correctly ranking ordered categorical data. See GH#15420
-
- Ordered categorical data should be ranked on the basis of
- codes with -1 translated to NaN.
-
- Returns
- -------
- numpy array
-
- """
- from pandas import Series
- if self.ordered:
- values = self.codes
- mask = values == -1
- if mask.any():
- values = values.astype('float64')
- values[mask] = np.nan
- elif self.categories.is_numeric():
- values = np.array(self)
- else:
- # reorder the categories (so rank can use the float codes)
- # instead of passing an object array to rank
- values = np.array(
- self.rename_categories(Series(self.categories).rank().values)
- )
- return values
-
- def ravel(self, order='C'):
- """ Return a flattened (numpy) array.
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- raveled : numpy array
- """
- return np.array(self)
-
- def view(self):
- """Return a view of myself.
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- view : Categorical
- Returns `self`!
- """
- return self
-
- def to_dense(self):
- """Return my 'dense' representation
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- dense : array
- """
- return np.asarray(self)
-
- @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
- def fillna(self, value=None, method=None, limit=None):
- """ Fill NA/NaN values using the specified method.
-
- Parameters
- ----------
- method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
- Method to use for filling holes in reindexed Series
- pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap
- value : scalar, dict, Series
- If a scalar value is passed it is used to fill all missing values.
- Alternatively, a Series or dict can be used to fill in different
- values for each index. The value should not be a list. The
- value(s) passed should either be in the categories or should be
- NaN.
- limit : int, default None
- (Not implemented yet for Categorical!)
- If method is specified, this is the maximum number of consecutive
- NaN values to forward/backward fill. In other words, if there is
- a gap with more than this number of consecutive NaNs, it will only
- be partially filled. If method is not specified, this is the
- maximum number of entries along the entire axis where NaNs will be
- filled.
-
- Returns
- -------
- filled : Categorical with NA/NaN filled
- """
-
- if value is None:
- value = np.nan
- if limit is not None:
- raise NotImplementedError("specifying a limit for fillna has not "
- "been implemented yet")
-
- values = self._codes
-
- # Make sure that we also get NA in categories
- if self.categories.dtype.kind in ['S', 'O', 'f']:
- if np.nan in self.categories:
- values = values.copy()
- nan_pos = np.where(isna(self.categories))[0]
- # we only have one NA in categories
- values[values == nan_pos] = -1
-
- # pad / bfill
- if method is not None:
-
- values = self.to_dense().reshape(-1, len(self))
- values = interpolate_2d(values, method, 0, None,
- value).astype(self.categories.dtype)[0]
- values = _get_codes_for_values(values, self.categories)
-
- else:
-
- # If value is a dict or a Series (a dict value has already
- # been converted to a Series)
- if isinstance(value, ABCSeries):
- if not value[~value.isin(self.categories)].isna().all():
- raise ValueError("fill value must be in categories")
-
- values_codes = _get_codes_for_values(value, self.categories)
- indexer = np.where(values_codes != -1)
- values[indexer] = values_codes[values_codes != -1]
-
- # If value is not a dict or Series it should be a scalar
- elif is_scalar(value):
- if not isna(value) and value not in self.categories:
- raise ValueError("fill value must be in categories")
-
- mask = values == -1
- if mask.any():
- values = values.copy()
- if isna(value):
- values[mask] = -1
- else:
- values[mask] = self.categories.get_loc(value)
-
- else:
- raise TypeError('"value" parameter must be a scalar, dict '
- 'or Series, but you passed a '
- '"{0}"'.format(type(value).__name__))
-
- return self._constructor(values, categories=self.categories,
- ordered=self.ordered, fastpath=True)
-
- def take_nd(self, indexer, allow_fill=True, fill_value=None):
- """ Take the codes by the indexer, fill with the fill_value.
-
- For internal compatibility with numpy arrays.
- """
-
- # filling must always be None/nan here
- # but is passed thru internally
- assert isna(fill_value)
-
- codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
- result = self._constructor(codes, categories=self.categories,
- ordered=self.ordered, fastpath=True)
- return result
-
- take = take_nd
-
- def _slice(self, slicer):
- """ Return a slice of myself.
-
- For internal compatibility with numpy arrays.
- """
-
- # only allow 1 dimensional slicing, but can
- # in a 2-d case be passd (slice(None),....)
- if isinstance(slicer, tuple) and len(slicer) == 2:
- if not is_null_slice(slicer[0]):
- raise AssertionError("invalid slicing for a 1-ndim "
- "categorical")
- slicer = slicer[1]
-
- _codes = self._codes[slicer]
- return self._constructor(values=_codes, categories=self.categories,
- ordered=self.ordered, fastpath=True)
-
- def __len__(self):
- """The length of this Categorical."""
- return len(self._codes)
-
- def __iter__(self):
- """Returns an Iterator over the values of this Categorical."""
- return iter(self.get_values())
-
- def _tidy_repr(self, max_vals=10, footer=True):
- """ a short repr displaying only max_vals and an optional (but default
- footer)
- """
- num = max_vals // 2
- head = self[:num]._get_repr(length=False, footer=False)
- tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
-
- result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
- if footer:
- result = u('{result}\n{footer}').format(result=result,
- footer=self._repr_footer())
-
- return compat.text_type(result)
-
- def _repr_categories(self):
- """ return the base repr for the categories """
- max_categories = (10 if get_option("display.max_categories") == 0 else
- get_option("display.max_categories"))
- from pandas.io.formats import format as fmt
- if len(self.categories) > max_categories:
- num = max_categories // 2
- head = fmt.format_array(self.categories[:num], None)
- tail = fmt.format_array(self.categories[-num:], None)
- category_strs = head + ["..."] + tail
- else:
- category_strs = fmt.format_array(self.categories, None)
-
- # Strip all leading spaces, which format_array adds for columns...
- category_strs = [x.strip() for x in category_strs]
- return category_strs
-
- def _repr_categories_info(self):
- """ Returns a string representation of the footer."""
-
- category_strs = self._repr_categories()
- dtype = getattr(self.categories, 'dtype_str',
- str(self.categories.dtype))
-
- levheader = "Categories ({length}, {dtype}): ".format(
- length=len(self.categories), dtype=dtype)
- width, height = get_terminal_size()
- max_width = get_option("display.width") or width
- if com.in_ipython_frontend():
- # 0 = no breaks
- max_width = 0
- levstring = ""
- start = True
- cur_col_len = len(levheader) # header
- sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
- linesep = sep.rstrip() + "\n" # remove whitespace
- for val in category_strs:
- if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
- levstring += linesep + (" " * (len(levheader) + 1))
- cur_col_len = len(levheader) + 1 # header + a whitespace
- elif not start:
- levstring += sep
- cur_col_len += len(val)
- levstring += val
- start = False
- # replace to simple save space by
- return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
-
- def _repr_footer(self):
-
- return u('Length: {length}\n{info}').format(
- length=len(self), info=self._repr_categories_info())
-
- def _get_repr(self, length=True, na_rep='NaN', footer=True):
- from pandas.io.formats import format as fmt
- formatter = fmt.CategoricalFormatter(self, length=length,
- na_rep=na_rep, footer=footer)
- result = formatter.to_string()
- return compat.text_type(result)
-
- def __unicode__(self):
- """ Unicode representation. """
- _maxlen = 10
- if len(self._codes) > _maxlen:
- result = self._tidy_repr(_maxlen)
- elif len(self._codes) > 0:
- result = self._get_repr(length=len(self) > _maxlen)
- else:
- msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
- result = ('[], {repr_msg}'.format(repr_msg=msg))
-
- return result
-
- def _maybe_coerce_indexer(self, indexer):
- """ return an indexer coerced to the codes dtype """
- if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
- indexer = indexer.astype(self._codes.dtype)
- return indexer
-
- def __getitem__(self, key):
- """ Return an item. """
- if isinstance(key, (int, np.integer)):
- i = self._codes[key]
- if i == -1:
- return np.nan
- else:
- return self.categories[i]
- else:
- return self._constructor(values=self._codes[key],
- categories=self.categories,
- ordered=self.ordered, fastpath=True)
-
- def __setitem__(self, key, value):
- """ Item assignment.
-
-
- Raises
- ------
- ValueError
- If (one or more) Value is not in categories or if a assigned
- `Categorical` does not have the same categories
- """
-
- # require identical categories set
- if isinstance(value, Categorical):
- if not value.categories.equals(self.categories):
- raise ValueError("Cannot set a Categorical with another, "
- "without identical categories")
-
- rvalue = value if is_list_like(value) else [value]
-
- from pandas import Index
- to_add = Index(rvalue).difference(self.categories)
-
- # no assignments of values not in categories, but it's always ok to set
- # something to np.nan
- if len(to_add) and not isna(to_add).all():
- raise ValueError("Cannot setitem on a Categorical with a new "
- "category, set the categories first")
-
- # set by position
- if isinstance(key, (int, np.integer)):
- pass
-
- # tuple of indexers (dataframe)
- elif isinstance(key, tuple):
- # only allow 1 dimensional slicing, but can
- # in a 2-d case be passd (slice(None),....)
- if len(key) == 2:
- if not is_null_slice(key[0]):
- raise AssertionError("invalid slicing for a 1-ndim "
- "categorical")
- key = key[1]
- elif len(key) == 1:
- key = key[0]
- else:
- raise AssertionError("invalid slicing for a 1-ndim "
- "categorical")
-
- # slicing in Series or Categorical
- elif isinstance(key, slice):
- pass
-
- # Array of True/False in Series or Categorical
- else:
- # There is a bug in numpy, which does not accept a Series as a
- # indexer
- # https://github.com/pandas-dev/pandas/issues/6168
- # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
- # FIXME: remove when numpy 1.9 is the lowest numpy version pandas
- # accepts...
- key = np.asarray(key)
-
- lindexer = self.categories.get_indexer(rvalue)
-
- # FIXME: the following can be removed after GH7820 is fixed:
- # https://github.com/pandas-dev/pandas/issues/7820
- # float categories do currently return -1 for np.nan, even if np.nan is
- # included in the index -> "repair" this here
- if isna(rvalue).any() and isna(self.categories).any():
- nan_pos = np.where(isna(self.categories))[0]
- lindexer[lindexer == -1] = nan_pos
-
- lindexer = self._maybe_coerce_indexer(lindexer)
- self._codes[key] = lindexer
-
- def _reverse_indexer(self):
- """
- Compute the inverse of a categorical, returning
- a dict of categories -> indexers.
-
- *This is an internal function*
-
- Returns
- -------
- dict of categories -> indexers
-
- Example
- -------
- In [1]: c = pd.Categorical(list('aabca'))
-
- In [2]: c
- Out[2]:
- [a, a, b, c, a]
- Categories (3, object): [a, b, c]
-
- In [3]: c.categories
- Out[3]: Index([u'a', u'b', u'c'], dtype='object')
-
- In [4]: c.codes
- Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
-
- In [5]: c._reverse_indexer()
- Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
-
- """
- categories = self.categories
- r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
- categories.size)
- counts = counts.cumsum()
- result = [r[counts[indexer]:counts[indexer + 1]]
- for indexer in range(len(counts) - 1)]
- result = dict(zip(categories, result))
- return result
-
- # reduction ops #
- def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
- filter_type=None, **kwds):
- """ perform the reduction type operation """
- func = getattr(self, name, None)
- if func is None:
- msg = 'Categorical cannot perform the operation {op}'
- raise TypeError(msg.format(op=name))
- return func(numeric_only=numeric_only, **kwds)
-
- def min(self, numeric_only=None, **kwargs):
- """ The minimum value of the object.
-
- Only ordered `Categoricals` have a minimum!
-
- Raises
- ------
- TypeError
- If the `Categorical` is not `ordered`.
-
- Returns
- -------
- min : the minimum of this `Categorical`
- """
- self.check_for_ordered('min')
- if numeric_only:
- good = self._codes != -1
- pointer = self._codes[good].min(**kwargs)
- else:
- pointer = self._codes.min(**kwargs)
- if pointer == -1:
- return np.nan
- else:
- return self.categories[pointer]
-
- def max(self, numeric_only=None, **kwargs):
- """ The maximum value of the object.
-
- Only ordered `Categoricals` have a maximum!
-
- Raises
- ------
- TypeError
- If the `Categorical` is not `ordered`.
-
- Returns
- -------
- max : the maximum of this `Categorical`
- """
- self.check_for_ordered('max')
- if numeric_only:
- good = self._codes != -1
- pointer = self._codes[good].max(**kwargs)
- else:
- pointer = self._codes.max(**kwargs)
- if pointer == -1:
- return np.nan
- else:
- return self.categories[pointer]
-
- def mode(self):
- """
- Returns the mode(s) of the Categorical.
-
- Always returns `Categorical` even if only one value.
-
- Returns
- -------
- modes : `Categorical` (sorted)
- """
-
- import pandas._libs.hashtable as htable
- good = self._codes != -1
- values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
- result = self._constructor(values=values, categories=self.categories,
- ordered=self.ordered, fastpath=True)
- return result
-
- def unique(self):
- """
- Return the ``Categorical`` which ``categories`` and ``codes`` are
- unique. Unused categories are NOT returned.
-
- - unordered category: values and categories are sorted by appearance
- order.
- - ordered category: values are sorted by appearance order, categories
- keeps existing order.
-
- Returns
- -------
- unique values : ``Categorical``
-
- Examples
- --------
- An unordered Categorical will return categories in the
- order of appearance.
-
- >>> pd.Categorical(list('baabc'))
- [b, a, c]
- Categories (3, object): [b, a, c]
-
- >>> pd.Categorical(list('baabc'), categories=list('abc'))
- [b, a, c]
- Categories (3, object): [b, a, c]
-
- An ordered Categorical preserves the category ordering.
-
- >>> pd.Categorical(list('baabc'),
- ... categories=list('abc'),
- ... ordered=True)
- [b, a, c]
- Categories (3, object): [a < b < c]
-
- See Also
- --------
- unique
- CategoricalIndex.unique
- Series.unique
-
- """
-
- # unlike np.unique, unique1d does not sort
- unique_codes = unique1d(self.codes)
- cat = self.copy()
-
- # keep nan in codes
- cat._codes = unique_codes
-
- # exclude nan from indexer for categories
- take_codes = unique_codes[unique_codes != -1]
- if self.ordered:
- take_codes = sorted(take_codes)
- return cat.set_categories(cat.categories.take(take_codes))
-
- def equals(self, other):
- """
- Returns True if categorical arrays are equal.
-
- Parameters
- ----------
- other : `Categorical`
-
- Returns
- -------
- are_equal : boolean
- """
- if self.is_dtype_equal(other):
- if self.categories.equals(other.categories):
- # fastpath to avoid re-coding
- other_codes = other._codes
- else:
- other_codes = _recode_for_categories(other.codes,
- other.categories,
- self.categories)
- return np.array_equal(self._codes, other_codes)
- return False
-
- def is_dtype_equal(self, other):
- """
- Returns True if categoricals are the same dtype
- same categories, and same ordered
-
- Parameters
- ----------
- other : Categorical
-
- Returns
- -------
- are_equal : boolean
- """
-
- try:
- return hash(self.dtype) == hash(other.dtype)
- except (AttributeError, TypeError):
- return False
-
- def describe(self):
- """ Describes this Categorical
-
- Returns
- -------
- description: `DataFrame`
- A dataframe with frequency and counts by category.
- """
- counts = self.value_counts(dropna=False)
- freqs = counts / float(counts.sum())
-
- from pandas.core.reshape.concat import concat
- result = concat([counts, freqs], axis=1)
- result.columns = ['counts', 'freqs']
- result.index.name = 'categories'
-
- return result
-
- def repeat(self, repeats, *args, **kwargs):
- """
- Repeat elements of a Categorical.
-
- See also
- --------
- numpy.ndarray.repeat
-
- """
- nv.validate_repeat(args, kwargs)
- codes = self._codes.repeat(repeats)
- return self._constructor(values=codes, categories=self.categories,
- ordered=self.ordered, fastpath=True)
-
-# The Series.cat accessor
-
-
-class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
- """
- Accessor object for categorical properties of the Series values.
-
- Be aware that assigning to `categories` is a inplace operation, while all
- methods return new categorical data per default (but can be called with
- `inplace=True`).
-
- Parameters
- ----------
- data : Series or CategoricalIndex
-
- Examples
- --------
- >>> s.cat.categories
- >>> s.cat.categories = list('abc')
- >>> s.cat.rename_categories(list('cab'))
- >>> s.cat.reorder_categories(list('cab'))
- >>> s.cat.add_categories(['d','e'])
- >>> s.cat.remove_categories(['d'])
- >>> s.cat.remove_unused_categories()
- >>> s.cat.set_categories(list('abcde'))
- >>> s.cat.as_ordered()
- >>> s.cat.as_unordered()
-
- """
-
- def __init__(self, data):
- self._validate(data)
- self.categorical = data.values
- self.index = data.index
- self.name = data.name
- self._freeze()
-
- @staticmethod
- def _validate(data):
- if not is_categorical_dtype(data.dtype):
- raise AttributeError("Can only use .cat accessor with a "
- "'category' dtype")
-
- def _delegate_property_get(self, name):
- return getattr(self.categorical, name)
-
- def _delegate_property_set(self, name, new_values):
- return setattr(self.categorical, name, new_values)
-
- @property
- def codes(self):
- from pandas import Series
- return Series(self.categorical.codes, index=self.index)
-
- def _delegate_method(self, name, *args, **kwargs):
- from pandas import Series
- method = getattr(self.categorical, name)
- res = method(*args, **kwargs)
- if res is not None:
- return Series(res, index=self.index, name=self.name)
-
-
-CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
- accessors=["categories",
- "ordered"],
- typ='property')
-CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
- "rename_categories", "reorder_categories", "add_categories",
- "remove_categories", "remove_unused_categories", "set_categories",
- "as_ordered", "as_unordered"], typ='method')
-
-# utility routines
-
-
-def _get_codes_for_values(values, categories):
- """
- utility routine to turn values into codes given the specified categories
- """
-
- from pandas.core.algorithms import _get_data_algo, _hashtables
- if not is_dtype_equal(values.dtype, categories.dtype):
- values = _ensure_object(values)
- categories = _ensure_object(categories)
-
- (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
- (_, _), cats = _get_data_algo(categories, _hashtables)
- t = hash_klass(len(cats))
- t.map_locations(cats)
- return coerce_indexer_dtype(t.lookup(vals), cats)
-
-
-def _recode_for_categories(codes, old_categories, new_categories):
- """
- Convert a set of codes for to a new set of categories
-
- Parameters
- ----------
- codes : array
- old_categories, new_categories : Index
-
- Returns
- -------
- new_codes : array
-
- Examples
- --------
- >>> old_cat = pd.Index(['b', 'a', 'c'])
- >>> new_cat = pd.Index(['a', 'b'])
- >>> codes = np.array([0, 1, 1, 2])
- >>> _recode_for_categories(codes, old_cat, new_cat)
- array([ 1, 0, 0, -1])
- """
- from pandas.core.algorithms import take_1d
-
- if len(old_categories) == 0:
- # All null anyway, so just retain the nulls
- return codes.copy()
- indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
- new_categories)
- new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
- return new_codes
-
-
-def _convert_to_list_like(list_like):
- if hasattr(list_like, "dtype"):
- return list_like
- if isinstance(list_like, list):
- return list_like
- if (is_sequence(list_like) or isinstance(list_like, tuple) or
- isinstance(list_like, types.GeneratorType)):
- return list(list_like)
- elif is_scalar(list_like):
- return [list_like]
- else:
- # is this reached?
- return [list_like]
-
-
-def _factorize_from_iterable(values):
- """
- Factorize an input `values` into `categories` and `codes`. Preserves
- categorical dtype in `categories`.
-
- *This is an internal function*
-
- Parameters
- ----------
- values : list-like
-
- Returns
- -------
- codes : ndarray
- categories : Index
- If `values` has a categorical dtype, then `categories` is
- a CategoricalIndex keeping the categories and order of `values`.
- """
- from pandas.core.indexes.category import CategoricalIndex
-
- if not is_list_like(values):
- raise TypeError("Input must be list-like")
-
- if is_categorical(values):
- if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
- values = values._values
- categories = CategoricalIndex(values.categories,
- categories=values.categories,
- ordered=values.ordered)
- codes = values.codes
- else:
- cat = Categorical(values, ordered=True)
- categories = cat.categories
- codes = cat.codes
- return codes, categories
-
-
-def _factorize_from_iterables(iterables):
- """
- A higher-level wrapper over `_factorize_from_iterable`.
-
- *This is an internal function*
-
- Parameters
- ----------
- iterables : list-like of list-likes
-
- Returns
- -------
- codes_list : list of ndarrays
- categories_list : list of Indexes
-
- Notes
- -----
- See `_factorize_from_iterable` for more info.
- """
- if len(iterables) == 0:
- # For consistency, it should return a list of 2 lists.
- return [[], []]
- return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
+from pandas.core.arrays import Categorical # noqa
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 5e6193d673756..3e54ce61cd5b2 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -314,7 +314,7 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False):
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical, CategoricalIndex, Series
- from pandas.core.categorical import _recode_for_categories
+ from pandas.core.arrays.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2c05eefa5706e..7771060ad82c7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -77,7 +77,7 @@
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 082b6e2a8b1a0..25e44589488ee 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -47,7 +47,7 @@
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
CategoricalIndex, _ensure_index)
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.internals import BlockManager, make_block
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index ac7cb30fa823d..9a6210db1aacb 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -125,7 +125,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None,
CategoricalIndex
"""
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
if categories is None:
categories = self.categories
if ordered is None:
@@ -162,7 +162,7 @@ def _create_categorical(self, data, categories=None, ordered=None,
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
@@ -462,7 +462,7 @@ def where(self, cond, other=None):
other = self._na_value
values = np.where(cond, self.values, other)
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
@@ -775,7 +775,7 @@ def _delegate_method(self, name, *args, **kwargs):
def _add_accessors(cls):
""" add in Categorical accessor methods """
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 5739c8dfd8b53..608553b9c3bf2 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1182,7 +1182,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
- from pandas.core.categorical import _factorize_from_iterables
+ from pandas.core.arrays.categorical import _factorize_from_iterables
labels, levels = _factorize_from_iterables(arrays)
if names is None:
@@ -1276,7 +1276,7 @@ def from_product(cls, iterables, sortorder=None, names=None):
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
- from pandas.core.categorical import _factorize_from_iterables
+ from pandas.core.arrays.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
@@ -1749,7 +1749,7 @@ def _get_labels_for_sorting(self):
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
def cats(label):
return np.arange(np.array(label).max() + 1 if len(label) else 0,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 3c923133477df..45618282ab4f7 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -59,7 +59,7 @@
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
-from pandas.core.categorical import Categorical, _maybe_to_categorical
+from pandas.core.arrays.categorical import Categorical, _maybe_to_categorical
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.formats.printing import pprint_thing
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index aaadf6d3ca32f..20f4384a3d698 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -7,8 +7,8 @@
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
-from pandas.core.categorical import (_factorize_from_iterable,
- _factorize_from_iterables)
+from pandas.core.arrays.categorical import (_factorize_from_iterable,
+ _factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 28e9694681912..01445eb30a9e5 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -4,7 +4,7 @@
from pandas.core.dtypes.common import is_list_like
from pandas import compat
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core.dtypes.generic import ABCMultiIndex
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f7a0fab9998d0..c8bca476c65f2 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -21,7 +21,8 @@
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
-from pandas.core.categorical import Categorical, _factorize_from_iterable
+from pandas.core.arrays import Categorical
+from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 73a7fe1fd89e9..be40f65186d2d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -53,7 +53,7 @@
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
-from pandas.core.categorical import Categorical, CategoricalAccessor
+from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 27252b9616a44..e550976d1deeb 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -182,7 +182,7 @@ def indexer_from_factorized(labels, shape, compress=True):
def lexsort_indexer(keys, orders=None, na_position='last'):
- from pandas.core.categorical import Categorical
+ from pandas.core.arrays import Categorical
labels = []
shape = []
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 150fccde81a60..1a2f62442a063 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -28,7 +28,7 @@
_ensure_index_from_sequences)
from pandas.core.series import Series
from pandas.core.frame import DataFrame
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 72543bb6f825e..c8490167022e5 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -36,7 +36,8 @@
from pandas.errors import PerformanceWarning
from pandas.core.common import _asarray_tuplesafe, _all_none
from pandas.core.algorithms import match, unique
-from pandas.core.categorical import Categorical, _factorize_from_iterables
+from pandas.core.arrays.categorical import (Categorical,
+ _factorize_from_iterables)
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 2b97b447921bb..b409cf20e9a09 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -24,7 +24,7 @@
from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range,
zip, BytesIO)
from pandas.core.base import StringMixin
-from pandas.core.categorical import Categorical
+from pandas.core.arrays import Categorical
from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object,
is_datetime64_dtype)
from pandas.core.frame import DataFrame
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 821c7858c7a5c..4a10ed6e7402c 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-
+import sys
from warnings import catch_warnings
import pytest
@@ -249,3 +249,13 @@ def test_deprecation_cdaterange(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
cdate_range('2017-01-01', '2017-12-31')
+
+
+class TestCategoricalMove(object):
+
+ def test_categorical_move(self):
+ # May have been cached by another import, e.g. pickle tests.
+ sys.modules.pop("pandas.core.categorical", None)
+
+ with tm.assert_produces_warning(FutureWarning):
+ from pandas.core.categorical import Categorical # noqa
diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py
index 0af2857091b74..ad5b78b36438b 100644
--- a/pandas/tests/categorical/test_api.py
+++ b/pandas/tests/categorical/test_api.py
@@ -7,7 +7,7 @@
import pandas.util.testing as tm
from pandas import Categorical, CategoricalIndex, Index, Series, DataFrame
-from pandas.core.categorical import _recode_for_categories
+from pandas.core.arrays.categorical import _recode_for_categories
from pandas.tests.categorical.common import TestCategorical
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 73cc87855acbd..cf8698bc5ed5e 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -511,8 +511,7 @@ def test_cat_accessor(self):
def test_cat_accessor_api(self):
# GH 9322
- from pandas.core.categorical import CategoricalAccessor
-
+ from pandas.core.arrays.categorical import CategoricalAccessor
assert Series.cat is CategoricalAccessor
s = Series(list('aabbcde')).astype('category')
assert isinstance(s.cat, CategoricalAccessor)
| Prep for https://github.com/pandas-dev/pandas/pull/19268
| https://api.github.com/repos/pandas-dev/pandas/pulls/19269 | 2018-01-16T16:06:55Z | 2018-01-18T17:31:43Z | 2018-01-18T17:31:42Z | 2018-01-18T17:31:46Z |
Array Interface and Categorical internals Refactor | diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py
index ee32b12f0e712..f8adcf520c15b 100644
--- a/pandas/core/arrays/__init__.py
+++ b/pandas/core/arrays/__init__.py
@@ -1 +1,2 @@
+from .base import ExtensionArray # noqa
from .categorical import Categorical # noqa
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
new file mode 100644
index 0000000000000..1556b653819a6
--- /dev/null
+++ b/pandas/core/arrays/base.py
@@ -0,0 +1,247 @@
+"""An interface for extending pandas with custom arrays."""
+from pandas.errors import AbstractMethodError
+
+_not_implemented_message = "{} does not implement {}."
+
+
+class ExtensionArray(object):
+ """Abstract base class for custom 1-D array types.
+
+ pandas will recognize instances of this class as proper arrays
+ with a custom type and will not attempt to coerce them to objects. They
+ may be stored directly inside a :class:`DataFrame` or :class:`Series`.
+
+ Notes
+ -----
+ The interface includes the following abstract methods that must be
+ implemented by subclasses:
+
+ * __getitem__
+ * __len__
+ * dtype
+ * nbytes
+ * isna
+ * take
+ * copy
+ * _formatting_values
+ * _concat_same_type
+
+ Some additional methods are required to satisfy pandas' internal, private
+ block API.
+
+ * _concat_same_type
+ * _can_hold_na
+
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
+ Methods and properties required by the interface raise
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
+ provided for registering virtual subclasses.
+
+ ExtensionArrays are limited to 1 dimension.
+
+ They may be backed by none, one, or many NumPy ararys. For example,
+ ``pandas.Categorical`` is an extension array backed by two arrays,
+ one for codes and one for categories. An array of IPv6 address may
+ be backed by a NumPy structured array with two fields, one for the
+ lower 64 bits and one for the upper 64 bits. Or they may be backed
+ by some other storage type, like Python lists. Pandas makes no
+ assumptions on how the data are stored, just that it can be converted
+ to a NumPy array.
+
+ Extension arrays should be able to be constructed with instances of
+ the class, i.e. ``ExtensionArray(extension_array)`` should return
+ an instance, not error.
+
+ Additionally, certain methods and interfaces are required for proper
+ this array to be properly stored inside a ``DataFrame`` or ``Series``.
+ """
+ # ------------------------------------------------------------------------
+ # Must be a Sequence
+ # ------------------------------------------------------------------------
+ def __getitem__(self, item):
+ # type (Any) -> Any
+ """Select a subset of self.
+
+ Parameters
+ ----------
+ item : int, slice, or ndarray
+ * int: The position in 'self' to get.
+
+ * slice: A slice object, where 'start', 'stop', and 'step' are
+ integers or None
+
+ * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
+
+ Returns
+ -------
+ item : scalar or ExtensionArray
+
+ Notes
+ -----
+ For scalar ``item``, return a scalar value suitable for the array's
+ type. This should be an instance of ``self.dtype.type``.
+
+ For slice ``key``, return an instance of ``ExtensionArray``, even
+ if the slice is length 0 or 1.
+
+ For a boolean mask, return an instance of ``ExtensionArray``, filtered
+ to the values where ``item`` is True.
+ """
+ raise AbstractMethodError(self)
+
+ def __setitem__(self, key, value):
+ # type: (Any, Any) -> None
+ raise NotImplementedError(_not_implemented_message.format(
+ type(self), '__setitem__')
+ )
+
+ def __len__(self):
+ """Length of this array
+
+ Returns
+ -------
+ length : int
+ """
+ # type: () -> int
+ raise AbstractMethodError(self)
+
+ # ------------------------------------------------------------------------
+ # Required attributes
+ # ------------------------------------------------------------------------
+ @property
+ def dtype(self):
+ # type: () -> ExtensionDtype
+ """An instance of 'ExtensionDtype'."""
+ raise AbstractMethodError(self)
+
+ @property
+ def shape(self):
+ # type: () -> Tuple[int, ...]
+ return (len(self),)
+
+ @property
+ def ndim(self):
+ # type: () -> int
+ """Extension Arrays are only allowed to be 1-dimensional."""
+ return 1
+
+ @property
+ def nbytes(self):
+ # type: () -> int
+ """The number of bytes needed to store this object in memory.
+
+ If this is expensive to compute, return an approximate lower bound
+ on the number of bytes needed.
+ """
+ raise AbstractMethodError(self)
+
+ # ------------------------------------------------------------------------
+ # Additional Methods
+ # ------------------------------------------------------------------------
+ def isna(self):
+ # type: () -> np.ndarray
+ """Boolean NumPy array indicating if each value is missing.
+
+ This should return a 1-D array the same length as 'self'.
+ """
+ raise AbstractMethodError(self)
+
+ # ------------------------------------------------------------------------
+ # Indexing methods
+ # ------------------------------------------------------------------------
+ def take(self, indexer, allow_fill=True, fill_value=None):
+ # type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray
+ """Take elements from an array.
+
+ Parameters
+ ----------
+ indexer : sequence of integers
+ indices to be taken. -1 is used to indicate values
+ that are missing.
+ allow_fill : bool, default True
+ If False, indexer is assumed to contain no -1 values so no filling
+ will be done. This short-circuits computation of a mask. Result is
+ undefined if allow_fill == False and -1 is present in indexer.
+ fill_value : any, default None
+ Fill value to replace -1 values with. By default, this uses
+ the missing value sentinel for this type, ``self._fill_value``.
+
+ Notes
+ -----
+ This should follow pandas' semantics where -1 indicates missing values.
+ Positions where indexer is ``-1`` should be filled with the missing
+ value for this type.
+
+ This is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when the
+ indexer is a sequence of values.
+
+ Examples
+ --------
+ Suppose the extension array somehow backed by a NumPy structured array
+ and that the underlying structured array is stored as ``self.data``.
+ Then ``take`` may be written as
+
+ .. code-block:: python
+
+ def take(self, indexer, allow_fill=True, fill_value=None):
+ mask = indexer == -1
+ result = self.data.take(indexer)
+ result[mask] = self._fill_value
+ return type(self)(result)
+ """
+ raise AbstractMethodError(self)
+
+ def copy(self, deep=False):
+ # type: (bool) -> ExtensionArray
+ """Return a copy of the array.
+
+ Parameters
+ ----------
+ deep : bool, default False
+ Also copy the underlying data backing this array.
+
+ Returns
+ -------
+ ExtensionArray
+ """
+ raise AbstractMethodError(self)
+
+ # ------------------------------------------------------------------------
+ # Block-related methods
+ # ------------------------------------------------------------------------
+ @property
+ def _fill_value(self):
+ # type: () -> Any
+ """The missing value for this type, e.g. np.nan"""
+ return None
+
+ def _formatting_values(self):
+ # type: () -> np.ndarray
+ # At the moment, this has to be an array since we use result.dtype
+ """An array of values to be printed in, e.g. the Series repr"""
+ raise AbstractMethodError(self)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat):
+ # type: (Sequence[ExtensionArray]) -> ExtensionArray
+ """Concatenate multiple array
+
+ Parameters
+ ----------
+ to_concat : sequence of this type
+
+ Returns
+ -------
+ ExtensionArray
+ """
+ raise AbstractMethodError(cls)
+
+ def _can_hold_na(self):
+ # type: () -> bool
+ """Whether your array can hold missing values. True by default.
+
+ Notes
+ -----
+ Setting this to false will optimize some operations like fillna.
+ """
+ return True
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index b50e01b0fb55a..62c6a6b16cbe9 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -43,6 +43,8 @@
from pandas.util._validators import validate_bool_kwarg
from pandas.core.config import get_option
+from .base import ExtensionArray
+
def _cat_compare_op(op):
def f(self, other):
@@ -148,7 +150,7 @@ def _maybe_to_categorical(array):
"""
-class Categorical(PandasObject):
+class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
@@ -2130,6 +2132,20 @@ def repeat(self, repeats, *args, **kwargs):
return self._constructor(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
+ # Implement the ExtensionArray interface
+ @property
+ def _can_hold_na(self):
+ return True
+
+ @classmethod
+ def _concat_same_type(self, to_concat):
+ from pandas.core.dtypes.concat import _concat_categorical
+
+ return _concat_categorical(to_concat)
+
+ def _formatting_values(self):
+ return self
+
# The Series.cat accessor
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e606be3cc2a23..6748db825acf0 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -25,7 +25,8 @@
# compat
from pandas.errors import ( # noqa
- PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError)
+ PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError,
+ AbstractMethodError)
# back-compat of public API
# deprecate these functions
@@ -88,19 +89,6 @@ class SettingWithCopyWarning(Warning):
pass
-class AbstractMethodError(NotImplementedError):
- """Raise this error instead of NotImplementedError for abstract methods
- while keeping compatibility with Python 2 and Python 3.
- """
-
- def __init__(self, class_instance):
- self.class_instance = class_instance
-
- def __str__(self):
- msg = "This method must be defined in the concrete class of {name}"
- return (msg.format(name=self.class_instance.__class__.__name__))
-
-
def flatten(l):
"""Flatten an arbitrarily nested sequence.
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
new file mode 100644
index 0000000000000..c7c5378801f02
--- /dev/null
+++ b/pandas/core/dtypes/base.py
@@ -0,0 +1,129 @@
+"""Extend pandas with custom array types"""
+from pandas.errors import AbstractMethodError
+
+
+class ExtensionDtype(object):
+ """A custom data type, to be paired with an ExtensionArray.
+
+ Notes
+ -----
+ The interface includes the following abstract methods that must
+ be implemented by subclasses:
+
+ * type
+ * name
+ * construct_from_string
+
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
+ Methods and properties required by the interface raise
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
+ provided for registering virtual subclasses.
+ """
+
+ def __str__(self):
+ return self.name
+
+ @property
+ def type(self):
+ # type: () -> type
+ """The scalar type for the array, e.g. ``int``
+
+ It's expected ``ExtensionArray[item]`` returns an instance
+ of ``ExtensionDtype.type`` for scalar ``item``.
+ """
+ raise AbstractMethodError(self)
+
+ @property
+ def kind(self):
+ # type () -> str
+ """A character code (one of 'biufcmMOSUV'), default 'O'
+
+ This should match the NumPy dtype used when the array is
+ converted to an ndarray, which is probably 'O' for object if
+ the extension type cannot be represented as a built-in NumPy
+ type.
+
+ See Also
+ --------
+ numpy.dtype.kind
+ """
+ return 'O'
+
+ @property
+ def name(self):
+ # type: () -> str
+ """A string identifying the data type.
+
+ Will be used for display in, e.g. ``Series.dtype``
+ """
+ raise AbstractMethodError(self)
+
+ @property
+ def names(self):
+ # type: () -> Optional[List[str]]
+ """Ordered list of field names, or None if there are no fields.
+
+ This is for compatibility with NumPy arrays, and may be removed in the
+ future.
+ """
+ return None
+
+ @classmethod
+ def construct_from_string(cls, string):
+ """Attempt to construct this type from a string.
+
+ Parameters
+ ----------
+ string : str
+
+ Returns
+ -------
+ self : instance of 'cls'
+
+ Raises
+ ------
+ TypeError
+ If a class cannot be constructed from this 'string'.
+
+ Examples
+ --------
+ If the extension dtype can be constructed without any arguments,
+ the following may be an adequate implementation.
+
+ >>> @classmethod
+ ... def construct_from_string(cls, string)
+ ... if string == cls.name:
+ ... return cls()
+ ... else:
+ ... raise TypeError("Cannot construct a '{}' from "
+ ... "'{}'".format(cls, string))
+ """
+ raise AbstractMethodError(cls)
+
+ @classmethod
+ def is_dtype(cls, dtype):
+ """Check if we match 'dtype'
+
+ Parameters
+ ----------
+ dtype : str or dtype
+
+ Returns
+ -------
+ is_dtype : bool
+
+ Notes
+ -----
+ The default implementation is True if
+
+ 1. ``cls.construct_from_string(dtype)`` is an instance
+ of ``cls``.
+ 2. 'dtype' is ``cls`` or a subclass of ``cls``.
+ """
+ if isinstance(dtype, str):
+ try:
+ return isinstance(cls.construct_from_string(dtype), cls)
+ except TypeError:
+ return False
+ else:
+ return issubclass(dtype, cls)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index dca9a5fde0d74..c66e7fcfc6978 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1685,6 +1685,35 @@ def is_extension_type(arr):
return False
+def is_extension_array_dtype(arr_or_dtype):
+ """Check if an object is a pandas extension array type.
+
+ Parameters
+ ----------
+ arr_or_dtype : object
+
+ Returns
+ -------
+ bool
+
+ Notes
+ -----
+ This checks whether an object implements the pandas extension
+ array interface. In pandas, this includes:
+
+ * Categorical
+
+ Third-party libraries may implement arrays or types satisfying
+ this interface as well.
+ """
+ from pandas.core.arrays import ExtensionArray
+
+ # we want to unpack series, anything else?
+ if isinstance(arr_or_dtype, ABCSeries):
+ arr_or_dtype = arr_or_dtype._values
+ return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray))
+
+
def is_complex_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a complex dtype.
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 1eb87aa99fd1e..d8d3a96992757 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -5,15 +5,15 @@
from pandas import compat
from pandas.core.dtypes.generic import ABCIndexClass, ABCCategoricalIndex
+from .base import ExtensionDtype
-class ExtensionDtype(object):
+
+class PandasExtensionDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
- name = None
- names = None
type = None
subdtype = None
kind = None
@@ -108,7 +108,7 @@ class CategoricalDtypeType(type):
pass
-class CategoricalDtype(ExtensionDtype):
+class CategoricalDtype(PandasExtensionDtype):
"""
Type for categorical data with the categories and orderedness
@@ -387,7 +387,7 @@ class DatetimeTZDtypeType(type):
pass
-class DatetimeTZDtype(ExtensionDtype):
+class DatetimeTZDtype(PandasExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz
@@ -501,8 +501,7 @@ class PeriodDtypeType(type):
pass
-class PeriodDtype(ExtensionDtype):
- __metaclass__ = PeriodDtypeType
+class PeriodDtype(PandasExtensionDtype):
"""
A Period duck-typed class, suitable for holding a period with freq dtype.
@@ -619,8 +618,7 @@ class IntervalDtypeType(type):
pass
-class IntervalDtype(ExtensionDtype):
- __metaclass__ = IntervalDtypeType
+class IntervalDtype(PandasExtensionDtype):
"""
A Interval duck-typed class, suitable for holding an interval
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3e5e4c99a899..cef5b776eff66 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -33,6 +33,7 @@
is_datetimelike_v_numeric,
is_float_dtype, is_numeric_dtype,
is_numeric_v_string_like, is_extension_type,
+ is_extension_array_dtype,
is_list_like,
is_re,
is_re_compilable,
@@ -61,8 +62,9 @@
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
-from pandas.core.arrays.categorical import Categorical, _maybe_to_categorical
+from pandas.core.arrays import Categorical
from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.io.formats.printing import pprint_thing
import pandas.core.missing as missing
@@ -103,24 +105,58 @@ class Block(PandasObject):
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
- _holder = None
_concatenator = staticmethod(np.concatenate)
def __init__(self, values, placement, ndim=None):
- if ndim is None:
- ndim = values.ndim
- elif values.ndim != ndim:
- raise ValueError('Wrong number of dimensions')
- self.ndim = ndim
-
+ self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
self.values = values
- if ndim and len(self.mgr_locs) != len(self.values):
+ if (self._validate_ndim and self.ndim and
+ len(self.mgr_locs) != len(self.values)):
raise ValueError(
'Wrong number of items passed {val}, placement implies '
'{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs)))
+ def _check_ndim(self, values, ndim):
+ """ndim inference and validation.
+
+ Infers ndim from 'values' if not provided to __init__.
+ Validates that values.ndim and ndim are consistent if and only if
+ the class variable '_validate_ndim' is True.
+
+ Parameters
+ ----------
+ values : array-like
+ ndim : int or None
+
+ Returns
+ -------
+ ndim : int
+
+ Raises
+ ------
+ ValueError : the number of dimensions do not match
+ """
+ if ndim is None:
+ ndim = values.ndim
+
+ if self._validate_ndim and values.ndim != ndim:
+ msg = ("Wrong number of dimensions. values.ndim != ndim "
+ "[{} != {}]")
+ raise ValueError(msg.format(values.ndim, ndim))
+
+ return ndim
+
+ @property
+ def _holder(self):
+ """The array-like that can hold the underlying values.
+
+ None for 'Block', overridden by subclasses that don't
+ use an ndarray.
+ """
+ return None
+
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@@ -279,7 +315,6 @@ def reshape_nd(self, labels, shape, ref_items, mgr=None):
return a new block that is transformed to a nd block
"""
-
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
@@ -535,15 +570,20 @@ def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, mgr=None, **kwargs):
- """
- Coerce to the new type
+ """Coerce to the new type
+ Parameters
+ ----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
+
+ Returns
+ -------
+ Block
"""
errors_legal_values = ('raise', 'ignore')
@@ -1671,27 +1711,28 @@ class NonConsolidatableMixIn(object):
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
- _holder = None
def __init__(self, values, placement, ndim=None):
+ """Initialize a non-consolidatable block.
- # Placement must be converted to BlockPlacement via property setter
- # before ndim logic, because placement may be a slice which doesn't
- # have a length.
- self.mgr_locs = placement
+ 'ndim' may be inferred from 'placement'.
- # kludgetastic
+ This will call continue to call __init__ for the other base
+ classes mixed in with this Mixin.
+ """
+ # Placement must be converted to BlockPlacement so that we can check
+ # its length
+ if not isinstance(placement, BlockPlacement):
+ placement = BlockPlacement(placement)
+
+ # Maybe infer ndim from placement
if ndim is None:
- if len(self.mgr_locs) != 1:
+ if len(placement) != 1:
ndim = 1
else:
ndim = 2
- self.ndim = ndim
-
- if not isinstance(values, self._holder):
- raise TypeError("values must be {0}".format(self._holder.__name__))
-
- self.values = values
+ super(NonConsolidatableMixIn, self).__init__(values, placement,
+ ndim=ndim)
@property
def shape(self):
@@ -1742,7 +1783,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
Returns
-------
- a new block(s), the result of the putmask
+ a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
@@ -1800,6 +1841,92 @@ def _unstack(self, unstacker_func, new_columns):
return blocks, mask
+class ExtensionBlock(NonConsolidatableMixIn, Block):
+ """Block for holding extension types.
+
+ Notes
+ -----
+ This holds all 3rd-party extension array types. It's also the immediate
+ parent class for our internal extension types' blocks, CategoricalBlock.
+
+ ExtensionArrays are limited to 1-D.
+ """
+ @property
+ def _holder(self):
+ # For extension blocks, the holder is values-dependent.
+ return type(self.values)
+
+ @property
+ def is_view(self):
+ """Extension arrays are never treated as views."""
+ return False
+
+ def get_values(self, dtype=None):
+ # ExtensionArrays must be iterable, so this works.
+ values = np.asarray(self.values)
+ if values.ndim == self.ndim - 1:
+ values = values.reshape((1,) + values.shape)
+ return values
+
+ def to_dense(self):
+ return np.asarray(self.values)
+
+ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
+ """
+ Take values according to indexer and return them as a block.
+ """
+ if fill_tuple is None:
+ fill_value = None
+ else:
+ fill_value = fill_tuple[0]
+
+ # axis doesn't matter; we are really a single-dim object
+ # but are passed the axis depending on the calling routing
+ # if its REALLY axis 0, then this will be a reindex and not a take
+ new_values = self.values.take(indexer, fill_value=fill_value)
+
+ # if we are a 1-dim object, then always place at 0
+ if self.ndim == 1:
+ new_mgr_locs = [0]
+ else:
+ if new_mgr_locs is None:
+ new_mgr_locs = self.mgr_locs
+
+ return self.make_block_same_class(new_values, new_mgr_locs)
+
+ def _can_hold_element(self, element):
+ # XXX: We may need to think about pushing this onto the array.
+ # We're doing the same as CategoricalBlock here.
+ return True
+
+ def _slice(self, slicer):
+ """ return a slice of my values """
+
+ # slice the category
+ # return same dims as we currently have
+
+ if isinstance(slicer, tuple) and len(slicer) == 2:
+ if not com.is_null_slice(slicer[0]):
+ raise AssertionError("invalid slicing for a 1-ndim "
+ "categorical")
+ slicer = slicer[1]
+
+ return self.values[slicer]
+
+ def formatting_values(self):
+ return self.values._formatting_values()
+
+ def concat_same_type(self, to_concat, placement=None):
+ """
+ Concatenate list of single blocks of the same type.
+ """
+ values = self._holder._concat_same_type(
+ [blk.values for blk in to_concat])
+ placement = placement or slice(0, len(values), 1)
+ return self.make_block_same_class(values, ndim=self.ndim,
+ placement=placement)
+
+
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
@@ -1905,6 +2032,11 @@ def should_store(self, value):
class DatetimeLikeBlockMixin(object):
+ """Mixin class for DatetimeBlock and DatetimeTZBlock."""
+
+ @property
+ def _holder(self):
+ return DatetimeIndex
@property
def _na_value(self):
@@ -1937,6 +2069,10 @@ def __init__(self, values, placement, ndim=None):
super(TimeDeltaBlock, self).__init__(values,
placement=placement, ndim=ndim)
+ @property
+ def _holder(self):
+ return TimedeltaIndex
+
@property
def _box_func(self):
return lambda x: tslib.Timedelta(x, unit='ns')
@@ -2312,30 +2448,24 @@ def re_replacer(s):
return block
-class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
+class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
- _holder = Categorical
_concatenator = staticmethod(_concat._concat_categorical)
def __init__(self, values, placement, ndim=None):
+ from pandas.core.arrays.categorical import _maybe_to_categorical
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),
- placement=placement, ndim=ndim)
+ placement=placement,
+ ndim=ndim)
@property
- def is_view(self):
- """ I am never a view """
- return False
-
- def to_dense(self):
- return self.values.to_dense().view()
-
- def convert(self, copy=True, **kwargs):
- return self.copy() if copy else self
+ def _holder(self):
+ return Categorical
@property
def array_dtype(self):
@@ -2344,13 +2474,6 @@ def array_dtype(self):
"""
return np.object_
- def _slice(self, slicer):
- """ return a slice of my values """
-
- # slice the category
- # return same dims as we currently have
- return self.values._slice(slicer)
-
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -2387,28 +2510,11 @@ def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
- def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
- """
- Take values according to indexer and return them as a block.bb
- """
- if fill_tuple is None:
- fill_value = None
- else:
- fill_value = fill_tuple[0]
-
- # axis doesn't matter; we are really a single-dim object
- # but are passed the axis depending on the calling routing
- # if its REALLY axis 0, then this will be a reindex and not a take
- new_values = self.values.take_nd(indexer, fill_value=fill_value)
-
- # if we are a 1-dim object, then always place at 0
- if self.ndim == 1:
- new_mgr_locs = [0]
- else:
- if new_mgr_locs is None:
- new_mgr_locs = self.mgr_locs
-
- return self.make_block_same_class(new_values, new_mgr_locs)
+ def to_dense(self):
+ # Categorical.get_values returns a DatetimeIndex for datetime
+ # categories, so we can't simply use `np.asarray(self.values)` like
+ # other types.
+ return self.values.get_values()
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
@@ -2427,6 +2533,15 @@ def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
+
+ Note that this CategoricalBlock._concat_same_type *may* not
+ return a CategoricalBlock. When the categories in `to_concat`
+ differ, this will return an object ndarray.
+
+ If / when we decide we don't like that behavior:
+
+ 1. Change Categorical._concat_same_type to use union_categoricals
+ 2. Delete this method.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
@@ -2442,12 +2557,29 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block):
_can_hold_na = True
def __init__(self, values, placement, ndim=None):
- if values.dtype != _NS_DTYPE:
- values = conversion.ensure_datetime64ns(values)
-
+ values = self._maybe_coerce_values(values)
super(DatetimeBlock, self).__init__(values,
placement=placement, ndim=ndim)
+ def _maybe_coerce_values(self, values):
+ """Input validation for values passed to __init__. Ensure that
+ we have datetime64ns, coercing if nescessary.
+
+ Parametetrs
+ -----------
+ values : array-like
+ Must be convertable to datetime64
+
+ Returns
+ -------
+ values : ndarray[datetime64ns]
+
+ Overridden by DatetimeTZBlock.
+ """
+ if values.dtype != _NS_DTYPE:
+ values = conversion.ensure_datetime64ns(values)
+ return values
+
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
@@ -2573,12 +2705,37 @@ def set(self, locs, values, check=False):
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
- _holder = DatetimeIndex
_concatenator = staticmethod(_concat._concat_datetime)
is_datetimetz = True
def __init__(self, values, placement, ndim=2, dtype=None):
+ # XXX: This will end up calling _maybe_coerce_values twice
+ # when dtype is not None. It's relatively cheap (just an isinstance)
+ # but it'd nice to avoid.
+ #
+ # If we can remove dtype from __init__, and push that conversion
+ # push onto the callers, then we can remove this entire __init__
+ # and just use DatetimeBlock's.
+ if dtype is not None:
+ values = self._maybe_coerce_values(values, dtype=dtype)
+ super(DatetimeTZBlock, self).__init__(values, placement=placement,
+ ndim=ndim)
+
+ def _maybe_coerce_values(self, values, dtype=None):
+ """Input validation for values passed to __init__. Ensure that
+ we have datetime64TZ, coercing if nescessary.
+ Parametetrs
+ -----------
+ values : array-like
+ Must be convertable to datetime64
+ dtype : string or DatetimeTZDtype, optional
+ Does a shallow copy to this tz
+
+ Returns
+ -------
+ values : ndarray[datetime64ns]
+ """
if not isinstance(values, self._holder):
values = self._holder(values)
@@ -2590,8 +2747,7 @@ def __init__(self, values, placement, ndim=2, dtype=None):
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
- super(DatetimeTZBlock, self).__init__(values, placement=placement,
- ndim=ndim)
+ return values
def copy(self, deep=True, mgr=None):
""" copy constructor """
@@ -2731,9 +2887,19 @@ class SparseBlock(NonConsolidatableMixIn, Block):
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
- _holder = SparseArray
_concatenator = staticmethod(_concat._concat_sparse)
+ def __init__(self, values, placement, ndim=None):
+ # Ensure that we have the underlying SparseArray here...
+ if isinstance(values, ABCSeries):
+ values = values.values
+ assert isinstance(values, SparseArray)
+ super(SparseBlock, self).__init__(values, placement, ndim=ndim)
+
+ @property
+ def _holder(self):
+ return SparseArray
+
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@@ -2907,6 +3073,8 @@ def get_block_type(values, dtype=None):
cls = BoolBlock
elif is_categorical(values):
cls = CategoricalBlock
+ elif is_extension_array_dtype(values):
+ cls = ExtensionBlock
else:
cls = ObjectBlock
return cls
@@ -4660,6 +4828,19 @@ def form_blocks(arrays, names, axes):
for i, _, array in items_dict['CategoricalBlock']]
blocks.extend(cat_blocks)
+ if len(items_dict['ExtensionBlock']):
+
+ external_blocks = []
+ for i, _, array in items_dict['ExtensionBlock']:
+ if isinstance(array, ABCSeries):
+ array = array.values
+ # Allow our internal arrays to chose their block type.
+ block_type = getattr(array, '_block_type', ExtensionBlock)
+ external_blocks.append(
+ make_block(array, klass=block_type,
+ fastpath=True, placement=[i]))
+ blocks.extend(external_blocks)
+
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 22b6d33be9d38..af4e83f506257 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -77,3 +77,26 @@ class NullFrequencyError(ValueError):
class AccessorRegistrationWarning(Warning):
"""Warning for attribute conflicts in accessor registration."""
+
+
+class AbstractMethodError(NotImplementedError):
+ """Raise this error instead of NotImplementedError for abstract methods
+ while keeping compatibility with Python 2 and Python 3.
+ """
+
+ def __init__(self, class_instance, methodtype='method'):
+ types = {'method', 'classmethod', 'staticmethod', 'property'}
+ if methodtype not in types:
+ msg = 'methodtype must be one of {}, got {} instead.'.format(
+ methodtype, types)
+ raise ValueError(msg)
+ self.methodtype = methodtype
+ self.class_instance = class_instance
+
+ def __str__(self):
+ if self.methodtype == 'classmethod':
+ name = self.class_instance.__name__
+ else:
+ name = self.class_instance.__class__.__name__
+ msg = "This {methodtype} must be defined in the concrete class {name}"
+ return (msg.format(methodtype=self.methodtype, name=name))
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index d800a7b92b559..eca4dd4cf2106 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -10,12 +10,14 @@
Series, Categorical, CategoricalIndex, IntervalIndex, date_range)
from pandas.compat import string_types
+from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
- IntervalDtype, CategoricalDtype)
+ IntervalDtype, CategoricalDtype, ExtensionDtype)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
+ is_extension_array_dtype,
is_period_dtype, is_period,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype, is_interval_dtype,
@@ -742,3 +744,31 @@ def test_categorical_categories(self):
tm.assert_index_equal(c1.categories, pd.Index(['a', 'b']))
c1 = CategoricalDtype(CategoricalIndex(['a', 'b']))
tm.assert_index_equal(c1.categories, pd.Index(['a', 'b']))
+
+
+class DummyArray(ExtensionArray):
+ pass
+
+
+class DummyDtype(ExtensionDtype):
+ pass
+
+
+class TestExtensionArrayDtype(object):
+
+ @pytest.mark.parametrize('values', [
+ pd.Categorical([]),
+ pd.Categorical([]).dtype,
+ pd.Series(pd.Categorical([])),
+ DummyDtype(),
+ DummyArray(),
+ ])
+ def test_is_extension_array_dtype(self, values):
+ assert is_extension_array_dtype(values)
+
+ @pytest.mark.parametrize('values', [
+ np.array([]),
+ pd.Series(np.array([])),
+ ])
+ def test_is_not_extension_array_dtype(self, values):
+ assert not is_extension_array_dtype(values)
diff --git a/pandas/tests/internals/test_external_block.py b/pandas/tests/internals/test_external_block.py
index 729ee0093b6dc..2487363df8f99 100644
--- a/pandas/tests/internals/test_external_block.py
+++ b/pandas/tests/internals/test_external_block.py
@@ -5,12 +5,12 @@
import pandas as pd
from pandas.core.internals import (
- Block, BlockManager, SingleBlockManager, NonConsolidatableMixIn)
+ BlockManager, SingleBlockManager, ExtensionBlock)
import pytest
-class CustomBlock(NonConsolidatableMixIn, Block):
+class CustomBlock(ExtensionBlock):
_holder = np.ndarray
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index e3490f465b24a..9338aba90d7cb 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -11,9 +11,8 @@
from distutils.version import LooseVersion
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
- Series, Categorical)
+ Series, Categorical, TimedeltaIndex, SparseArray)
from pandas.compat import OrderedDict, lrange
-from pandas.core.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager,
make_block, BlockManager)
import pandas.core.algorithms as algos
@@ -1263,9 +1262,30 @@ def test_binop_other(self, op, value, dtype):
assert_series_equal(result, expected)
+@pytest.mark.parametrize('typestr, holder', [
+ ('category', Categorical),
+ ('M8[ns]', DatetimeIndex),
+ ('M8[ns, US/Central]', DatetimeIndex),
+ ('m8[ns]', TimedeltaIndex),
+ ('sparse', SparseArray),
+])
+def test_holder(typestr, holder):
+ blk = create_block(typestr, [1])
+ assert blk._holder is holder
+
+
def test_deprecated_fastpath():
# GH#19265
values = np.random.rand(3, 3)
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
make_block(values, placement=np.arange(3), fastpath=True)
+
+
+def test_validate_ndim():
+ values = np.array([1.0, 2.0])
+ placement = slice(2)
+ msg = "Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
+
+ with tm.assert_raises_regex(ValueError, msg):
+ make_block(values, placement, ndim=2)
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 0b7948cc32d24..54f567bcd2a8c 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -574,6 +574,15 @@ def test_setitem_array(self):
self.frame['F'].reindex(index),
check_names=False)
+ def test_setitem_chained_no_consolidate(self):
+ # https://github.com/pandas-dev/pandas/pull/19268
+ # issuecomment-361696418
+ # chained setitem used to cause consolidation
+ sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
+ with pd.option_context('mode.chained_assignment', None):
+ sdf[0][1] = 2
+ assert len(sdf._data.blocks) == 2
+
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index babf88ef1df8d..e2a142366a89e 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -4,6 +4,8 @@
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
+from pandas.errors import AbstractMethodError
+import pandas.util.testing as tm
@pytest.mark.parametrize(
@@ -50,3 +52,30 @@ def test_error_rename():
raise ParserError()
except pd.parser.CParserError:
pass
+
+
+class Foo:
+ @classmethod
+ def classmethod(cls):
+ raise AbstractMethodError(cls, methodtype='classmethod')
+
+ @property
+ def property(self):
+ raise AbstractMethodError(self, methodtype='property')
+
+ def method(self):
+ raise AbstractMethodError(self)
+
+
+def test_AbstractMethodError_classmethod():
+ xpr = "This classmethod must be defined in the concrete class Foo"
+ with tm.assert_raises_regex(AbstractMethodError, xpr):
+ Foo.classmethod()
+
+ xpr = "This property must be defined in the concrete class Foo"
+ with tm.assert_raises_regex(AbstractMethodError, xpr):
+ Foo().property
+
+ xpr = "This method must be defined in the concrete class Foo"
+ with tm.assert_raises_regex(AbstractMethodError, xpr):
+ Foo().method()
| (edit post categorical-move)
Rebased on master. Summary of the changes from master:
Added the ExtensionArray class
Categorical subclasses ExtensionArray
Implements the new methods for the interface (all private. No public API
changes)
Adapted the ExtensionDtype class to be the public ABC
a. Subclass that with PandasExtensionClass that does non-interface things
like reprs, caching, etc.
b. All our custom dtypes inherit from PandasExtensionClass, so they implement
the interface.
Internals Changes:
a. Added an ExtensionBlock. This will be a parent for our current custom
blocks, and the block type for all 3rd-party extension arrays.
Added a new is_extension_array_dtype method. I think this is nescessary
for now, until we've handled DatetimeTZ.
This isn't really a test of whether extension arrays work yet, since we're
still using Categorical for everything. I have a followup PR that implements
an IntervalArray that requires additional changes to, e.g., the constructors
so that things work. But all the changes from core/internals.py required to
make that work are present here.
---
1. New class hierarchy in internals
Old:
```python
class CategoricalBlock(NonConsolidatableMixin, ObjectBlock):
pass
```
new:
```python
class ExtensionBlock(NonConsolidatableMixin, Block):
pass
class CategoricalBlock(ExtensionBlock):
pass
```
Figuring out which methods of `ObjectBlock` were required on `CategoricalBlock`
wasn't trivial for me. I probably messed some up.
I think that eventually we can remove `NonConsolidatableMixin`, with the idea
that all non-consolidatable blocks are blocks for extension dtypes? That's true
today anyway.
Followup PRs:
1. Making `core/arrays/period.py` and refactoring `PeriodIndex`
2. Making `core/arrays/interval.py` and refactoring `IntervalIndex`
3. Adding docs and generic tests like https://github.com/pandas-dev/pandas/pull/19174/files#diff-e448fe09dbe8aed468d89a4c90e65cff for our interface (once it's stabilized a bit). | https://api.github.com/repos/pandas-dev/pandas/pulls/19268 | 2018-01-16T15:25:09Z | 2018-02-02T21:34:21Z | 2018-02-02T21:34:21Z | 2018-02-02T21:38:06Z |
Remove unused fastpath kwarg from Blocks | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2177fa541b13e..d616ef441a31b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -104,7 +104,7 @@ class Block(PandasObject):
_holder = None
_concatenator = staticmethod(np.concatenate)
- def __init__(self, values, placement, ndim=None, fastpath=False):
+ def __init__(self, values, placement, ndim=None):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
@@ -204,7 +204,7 @@ def array_dtype(self):
"""
return self.dtype
- def make_block(self, values, placement=None, ndim=None, **kwargs):
+ def make_block(self, values, placement=None, ndim=None):
"""
Create a new block, with type inference propagate any values that are
not specified
@@ -214,21 +214,20 @@ def make_block(self, values, placement=None, ndim=None, **kwargs):
if ndim is None:
ndim = self.ndim
- return make_block(values, placement=placement, ndim=ndim, **kwargs)
+ return make_block(values, placement=placement, ndim=ndim)
- def make_block_scalar(self, values, **kwargs):
+ def make_block_scalar(self, values):
"""
Create a ScalarBlock
"""
return ScalarBlock(values)
- def make_block_same_class(self, values, placement=None, fastpath=True,
- **kwargs):
+ def make_block_same_class(self, values, placement=None, ndim=None):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
- return make_block(values, placement=placement, klass=self.__class__,
- fastpath=fastpath, **kwargs)
+ return make_block(values, placement=placement, ndim=ndim,
+ klass=self.__class__)
def __unicode__(self):
@@ -339,7 +338,7 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
new_values = algos.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
- return self.make_block(new_values, fastpath=True)
+ return self.make_block(new_values)
def iget(self, i):
return self.values[i]
@@ -458,7 +457,7 @@ def make_a_block(nv, ref_loc):
except (AttributeError, NotImplementedError):
pass
block = self.make_block(values=nv,
- placement=ref_loc, fastpath=True)
+ placement=ref_loc)
return block
# ndim == 1
@@ -517,7 +516,7 @@ def downcast(self, dtypes=None, mgr=None):
dtypes = 'infer'
nv = maybe_downcast_to_dtype(values, dtypes)
- return self.make_block(nv, fastpath=True)
+ return self.make_block(nv)
# ndim > 1
if dtypes is None:
@@ -908,7 +907,7 @@ def _is_empty_indexer(indexer):
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
- block = self.make_block(transf(values), fastpath=True)
+ block = self.make_block(transf(values))
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0,
@@ -1024,7 +1023,7 @@ def f(m, v, i):
if transpose:
new_values = new_values.T
- return [self.make_block(new_values, fastpath=True)]
+ return [self.make_block(new_values)]
def coerce_to_target_dtype(self, other):
"""
@@ -1159,7 +1158,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
dtype=self.dtype)
values = self._try_coerce_result(values)
- blocks = [self.make_block(values, klass=self.__class__, fastpath=True)]
+ blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
@@ -1199,8 +1198,7 @@ def func(x):
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
- blocks = [self.make_block(interp_values, klass=self.__class__,
- fastpath=True)]
+ blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
@@ -1244,7 +1242,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
- return [self.make_block(values=new_values, fastpath=True)]
+ return [self.make_block(values=new_values)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
@@ -1274,7 +1272,7 @@ def shift(self, periods, axis=0, mgr=None):
if f_ordered:
new_values = new_values.T
- return [self.make_block(new_values, fastpath=True)]
+ return [self.make_block(new_values)]
def eval(self, func, other, errors='raise', try_cast=False, mgr=None):
"""
@@ -1414,7 +1412,7 @@ def handle_error():
result = self._try_cast_result(result)
result = _block_shape(result, ndim=self.ndim)
- return [self.make_block(result, fastpath=True, )]
+ return [self.make_block(result)]
def where(self, other, cond, align=True, errors='raise',
try_cast=False, axis=0, transpose=False, mgr=None):
@@ -1694,7 +1692,7 @@ class NonConsolidatableMixIn(object):
_validate_ndim = False
_holder = None
- def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
+ def __init__(self, values, placement, ndim=None):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
@@ -1951,12 +1949,12 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
_can_hold_na = True
is_numeric = False
- def __init__(self, values, placement, fastpath=False, **kwargs):
+ def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
values = conversion.ensure_timedelta64ns(values)
- super(TimeDeltaBlock, self).__init__(values, fastpath=True,
- placement=placement, **kwargs)
+ super(TimeDeltaBlock, self).__init__(values,
+ placement=placement, ndim=ndim)
@property
def _box_func(self):
@@ -2089,13 +2087,12 @@ class ObjectBlock(Block):
is_object = True
_can_hold_na = True
- def __init__(self, values, ndim=2, fastpath=False, placement=None,
- **kwargs):
+ def __init__(self, values, placement=None, ndim=2):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
- super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath,
- placement=placement, **kwargs)
+ super(ObjectBlock, self).__init__(values, ndim=ndim,
+ placement=placement)
@property
def is_bool(self):
@@ -2342,12 +2339,11 @@ class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
_holder = Categorical
_concatenator = staticmethod(_concat._concat_categorical)
- def __init__(self, values, placement, fastpath=False, **kwargs):
+ def __init__(self, values, placement, ndim=None):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),
- fastpath=True,
- placement=placement, **kwargs)
+ placement=placement, ndim=ndim)
@property
def is_view(self):
@@ -2464,12 +2460,12 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block):
is_datetime = True
_can_hold_na = True
- def __init__(self, values, placement, fastpath=False, **kwargs):
+ def __init__(self, values, placement, ndim=None):
if values.dtype != _NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
- super(DatetimeBlock, self).__init__(values, fastpath=True,
- placement=placement, **kwargs)
+ super(DatetimeBlock, self).__init__(values,
+ placement=placement, ndim=ndim)
def _astype(self, dtype, mgr=None, **kwargs):
"""
@@ -2600,13 +2596,11 @@ class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
_concatenator = staticmethod(_concat._concat_datetime)
is_datetimetz = True
- def __init__(self, values, placement, ndim=2, **kwargs):
+ def __init__(self, values, placement, ndim=2, dtype=None):
if not isinstance(values, self._holder):
values = self._holder(values)
- dtype = kwargs.pop('dtype', None)
-
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
@@ -2616,7 +2610,7 @@ def __init__(self, values, placement, ndim=2, **kwargs):
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values, placement=placement,
- ndim=ndim, **kwargs)
+ ndim=ndim)
def copy(self, deep=True, mgr=None):
""" copy constructor """
@@ -2822,7 +2816,7 @@ def copy(self, deep=True, mgr=None):
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
- copy=False, fastpath=True, **kwargs):
+ copy=False, ndim=None):
""" return a new block """
if dtype is None:
dtype = values.dtype
@@ -2841,8 +2835,7 @@ def make_block_same_class(self, values, placement, sparse_index=None,
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
- placement,
- fastpath=True)
+ placement)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
@@ -2851,7 +2844,7 @@ def make_block_same_class(self, values, placement, sparse_index=None,
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
- return self.make_block(new_values, fastpath=fastpath,
+ return self.make_block(new_values,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
@@ -2960,16 +2953,20 @@ def get_block_type(values, dtype=None):
def make_block(values, placement, klass=None, ndim=None, dtype=None,
- fastpath=False):
+ fastpath=None):
+ if fastpath is not None:
+ # GH#19265 pyarrow is passing this
+ warnings.warn("fastpath argument is deprecated, will be removed "
+ "in a future release.", DeprecationWarning)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetimetz(values):
- return klass(values, ndim=ndim, fastpath=fastpath,
+ return klass(values, ndim=ndim,
placement=placement, dtype=dtype)
- return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
+ return klass(values, ndim=ndim, placement=placement)
# TODO: flexible with index=None and/or items=None
@@ -3029,7 +3026,7 @@ class BlockManager(PandasObject):
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
- def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
+ def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
@@ -3640,8 +3637,7 @@ def get_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
- bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
- fastpath=True)
+ bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
@@ -3796,7 +3792,7 @@ def xs(self, key, axis=1, copy=True, takeable=False):
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
- klass=blk.__class__, fastpath=True,
+ klass=blk.__class__,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
@@ -3806,8 +3802,7 @@ def xs(self, key, axis=1, copy=True, takeable=False):
vals = vals.copy()
new_blocks = [make_block(values=vals,
placement=block.mgr_locs,
- klass=block.__class__,
- fastpath=True, )]
+ klass=block.__class__)]
return self.__class__(new_blocks, new_axes)
@@ -3910,7 +3905,7 @@ def iget(self, i, fastpath=True):
return SingleBlockManager(
[block.make_block_same_class(values,
placement=slice(0, len(values)),
- ndim=1, fastpath=True)],
+ ndim=1)],
self.axes[1])
def get_scalar(self, tup):
@@ -4432,8 +4427,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
block = block[0]
if not isinstance(block, Block):
- block = make_block(block, placement=slice(0, len(axis)), ndim=1,
- fastpath=True)
+ block = make_block(block, placement=slice(0, len(axis)), ndim=1)
self.blocks = [block]
@@ -4725,7 +4719,6 @@ def form_blocks(arrays, names, axes):
if len(items_dict['DatetimeTZBlock']):
dttz_blocks = [make_block(array,
klass=DatetimeTZBlock,
- fastpath=True,
placement=[i])
for i, _, array in items_dict['DatetimeTZBlock']]
blocks.extend(dttz_blocks)
@@ -4743,8 +4736,7 @@ def form_blocks(arrays, names, axes):
blocks.extend(sparse_blocks)
if len(items_dict['CategoricalBlock']) > 0:
- cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True,
- placement=[i])
+ cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict['CategoricalBlock']]
blocks.extend(cat_blocks)
@@ -4800,8 +4792,7 @@ def _sparse_blockify(tuples, dtype=None):
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
- block = make_block(array, klass=SparseBlock, fastpath=True,
- placement=[i])
+ block = make_block(array, klass=SparseBlock, placement=[i])
new_blocks.append(block)
return new_blocks
@@ -4885,7 +4876,7 @@ def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
- return make_block(new_values, fastpath=True, placement=new_mgr_locs)
+ return make_block(new_values, placement=new_mgr_locs)
# no merge
return blocks
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 623d2d39607c2..b1f89829c95a5 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1254,3 +1254,11 @@ def test_binop_other(self, op, value, dtype):
result = op(s, e).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
+
+
+def test_deprecated_fastpath():
+ # GH#19265
+ values = np.random.rand(3, 3)
+ with tm.assert_produces_warning(DeprecationWarning,
+ check_stacklevel=False):
+ make_block(values, placement=np.arange(3), fastpath=True)
| The `fastpath` kwarg in `Block.__init__` must be vestigial or something. It isn't used anywhere in the blocks themselves. This PR removes the unused kwarg. | https://api.github.com/repos/pandas-dev/pandas/pulls/19265 | 2018-01-16T06:30:52Z | 2018-01-19T21:50:33Z | 2018-01-19T21:50:33Z | 2018-12-07T22:36:12Z |
CLN: put mgr_locs setter next to property definition | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 3c923133477df..43fdd454250a5 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -191,6 +191,13 @@ def fill_value(self):
def mgr_locs(self):
return self._mgr_locs
+ @mgr_locs.setter
+ def mgr_locs(self, new_mgr_locs):
+ if not isinstance(new_mgr_locs, BlockPlacement):
+ new_mgr_locs = BlockPlacement(new_mgr_locs)
+
+ self._mgr_locs = new_mgr_locs
+
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
@@ -224,13 +231,6 @@ def make_block_same_class(self, values, placement=None, fastpath=True,
return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
- @mgr_locs.setter
- def mgr_locs(self, new_mgr_locs):
- if not isinstance(new_mgr_locs, BlockPlacement):
- new_mgr_locs = BlockPlacement(new_mgr_locs)
-
- self._mgr_locs = new_mgr_locs
-
def __unicode__(self):
# don't want to print out all of the items here
@@ -840,7 +840,6 @@ def setitem(self, indexer, value, mgr=None):
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
- l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
@@ -855,7 +854,7 @@ def setitem(self, indexer, value, mgr=None):
# slice
elif isinstance(indexer, slice):
- if is_list_like(value) and l:
+ if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19264 | 2018-01-16T04:42:29Z | 2018-01-17T00:15:31Z | 2018-01-17T00:15:31Z | 2018-02-11T21:59:07Z |
BUG: Patch handling of keep_default_na=False | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 2f29e390c0ba1..ae04996b4fddf 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -214,8 +214,20 @@ na_values : scalar, str, list-like, or dict, default ``None``
for a list of the values interpreted as NaN by default.
keep_default_na : boolean, default ``True``
- If na_values are specified and keep_default_na is ``False`` the default NaN
- values are overridden, otherwise they're appended to.
+ Whether or not to include the default NaN values when parsing the data.
+ Depending on whether `na_values` is passed in, the behavior is as follows:
+
+ * If `keep_default_na` is True, and `na_values` are specified, `na_values`
+ is appended to the default NaN values used for parsing.
+ * If `keep_default_na` is True, and `na_values` are not specified, only
+ the default NaN values are used for parsing.
+ * If `keep_default_na` is False, and `na_values` are specified, only
+ the NaN values specified `na_values` are used for parsing.
+ * If `keep_default_na` is False, and `na_values` are not specified, no
+ strings will be parsed as NaN.
+
+ Note that if `na_filter` is passed in as False, the `keep_default_na` and
+ `na_values` parameters will be ignored.
na_filter : boolean, default ``True``
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing ``na_filter=False`` can improve the performance
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 853d5cee11cd1..326673a54acfa 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -463,6 +463,7 @@ I/O
- :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`)
- Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`)
- Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`)
+- Bug in :func:`read_csv` where missing values were not being handled properly when ``keep_default_na=False`` with dictionary ``na_values`` (:issue:`19227`)
- Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`)
- Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`)
- Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index cf63b5083885e..5efe2147f6f8e 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -288,7 +288,7 @@ cdef class TextReader:
object file_handle, na_fvalues
object true_values, false_values
object handle
- bint na_filter, verbose, has_usecols, has_mi_columns
+ bint na_filter, keep_default_na, verbose, has_usecols, has_mi_columns
int64_t parser_start
list clocks
char *c_encoding
@@ -352,6 +352,8 @@ cdef class TextReader:
na_filter=True,
na_values=None,
na_fvalues=None,
+ keep_default_na=True,
+
true_values=None,
false_values=None,
allow_leading_cols=True,
@@ -378,8 +380,8 @@ cdef class TextReader:
self.parser = parser_new()
self.parser.chunksize = tokenize_chunksize
- self.mangle_dupe_cols=mangle_dupe_cols
- self.tupleize_cols=tupleize_cols
+ self.mangle_dupe_cols = mangle_dupe_cols
+ self.tupleize_cols = tupleize_cols
# For timekeeping
self.clocks = []
@@ -477,6 +479,7 @@ cdef class TextReader:
self.true_set = kset_from_list(self.true_values)
self.false_set = kset_from_list(self.false_values)
+ self.keep_default_na = keep_default_na
self.converters = converters
self.na_filter = na_filter
@@ -1299,7 +1302,10 @@ cdef class TextReader:
elif i in self.na_values:
key = i
else: # No na_values provided for this column.
- return _NA_VALUES, set()
+ if self.keep_default_na:
+ return _NA_VALUES, set()
+
+ return list(), set()
values = self.na_values[key]
if values is not None and not isinstance(values, list):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 150fccde81a60..1ba687541eecf 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -149,8 +149,20 @@
NaN: '""" + fill("', '".join(sorted(_NA_VALUES)),
70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
- If na_values are specified and keep_default_na is False the default NaN
- values are overridden, otherwise they're appended to.
+ Whether or not to include the default NaN values when parsing the data.
+ Depending on whether `na_values` is passed in, the behavior is as follows:
+
+ * If `keep_default_na` is True, and `na_values` are specified, `na_values`
+ is appended to the default NaN values used for parsing.
+ * If `keep_default_na` is True, and `na_values` are not specified, only
+ the default NaN values are used for parsing.
+ * If `keep_default_na` is False, and `na_values` are specified, only
+ the NaN values specified `na_values` are used for parsing.
+ * If `keep_default_na` is False, and `na_values` are not specified, no
+ strings will be parsed as NaN.
+
+ Note that if `na_filter` is passed in as False, the `keep_default_na` and
+ `na_values` parameters will be ignored.
na_filter : boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
@@ -910,9 +922,6 @@ def _clean_options(self, options, engine):
na_values = options['na_values']
skiprows = options['skiprows']
- # really delete this one
- keep_default_na = result.pop('keep_default_na')
-
_validate_header_arg(options['header'])
depr_warning = ''
@@ -957,6 +966,7 @@ def _clean_options(self, options, engine):
converters = {}
# Converting values to NA
+ keep_default_na = options['keep_default_na']
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
@@ -1225,6 +1235,7 @@ def __init__(self, kwds):
self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.na_filter = kwds.get('na_filter', False)
+ self.keep_default_na = kwds.get('keep_default_na', True)
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
@@ -1487,7 +1498,8 @@ def _agg_index(self, index, try_parse_dates=True):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
- col_name, self.na_values, self.na_fvalues)
+ col_name, self.na_values, self.na_fvalues,
+ self.keep_default_na)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
@@ -1510,7 +1522,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
- c, na_values, na_fvalues)
+ c, na_values, na_fvalues, self.keep_default_na)
else:
col_na_values, col_na_fvalues = set(), set()
@@ -3097,16 +3109,23 @@ def _clean_na_values(na_values, keep_default_na=True):
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
- na_values = na_values.copy() # Prevent aliasing.
- if keep_default_na:
- for k, v in compat.iteritems(na_values):
- if not is_list_like(v):
- v = [v]
+ old_na_values = na_values.copy()
+ na_values = {} # Prevent aliasing.
+
+ # Convert the values in the na_values dictionary
+ # into array-likes for further use. This is also
+ # where we append the default NaN values, provided
+ # that `keep_default_na=True`.
+ for k, v in compat.iteritems(old_na_values):
+ if not is_list_like(v):
+ v = [v]
+
+ if keep_default_na:
v = set(v) | _NA_VALUES
- na_values[k] = v
- na_fvalues = dict(
- (k, _floatify_na_values(v)) for k, v in na_values.items() # noqa
- )
+
+ na_values[k] = v
+ na_fvalues = dict((k, _floatify_na_values(v))
+ for k, v in na_values.items())
else:
if not is_list_like(na_values):
na_values = [na_values]
@@ -3225,12 +3244,38 @@ def _stringify_na_values(na_values):
return set(result)
-def _get_na_values(col, na_values, na_fvalues):
+def _get_na_values(col, na_values, na_fvalues, keep_default_na):
+ """
+ Get the NaN values for a given column.
+
+ Parameters
+ ----------
+ col : str
+ The name of the column.
+ na_values : array-like, dict
+ The object listing the NaN values as strings.
+ na_fvalues : array-like, dict
+ The object listing the NaN values as floats.
+ keep_default_na : bool
+ If `na_values` is a dict, and the column is not mapped in the
+ dictionary, whether to return the default NaN values or the empty set.
+
+ Returns
+ -------
+ nan_tuple : A length-two tuple composed of
+
+ 1) na_values : the string NaN values for that column.
+ 2) na_fvalues : the float NaN values for that column.
+ """
+
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
- return _NA_VALUES, set()
+ if keep_default_na:
+ return _NA_VALUES, set()
+
+ return set(), set()
else:
return na_values, na_fvalues
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index f8906d5a1f7ba..d2c3f82e95c4d 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -224,6 +224,45 @@ def test_na_values_keep_default(self):
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
+ def test_no_keep_default_na_dict_na_values(self):
+ # see gh-19227
+ data = "a,b\n,2"
+
+ df = self.read_csv(StringIO(data), na_values={"b": ["2"]},
+ keep_default_na=False)
+ expected = DataFrame({"a": [""], "b": [np.nan]})
+ tm.assert_frame_equal(df, expected)
+
+ # Scalar values shouldn't cause the parsing to crash or fail.
+ data = "a,b\n1,2"
+
+ df = self.read_csv(StringIO(data), na_values={"b": 2},
+ keep_default_na=False)
+ expected = DataFrame({"a": [1], "b": [np.nan]})
+ tm.assert_frame_equal(df, expected)
+
+ data = """\
+113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008
+729639,"qwer","",asdfkj,466.681,,252.373
+"""
+ expected = DataFrame({0: [np.nan, 729639.0],
+ 1: [np.nan, "qwer"],
+ 2: ["/blaha", np.nan],
+ 3: ["kjsdkj", "asdfkj"],
+ 4: [412.166, 466.681],
+ 5: ["225.874", ""],
+ 6: [np.nan, 252.373]})
+
+ df = self.read_csv(StringIO(data), header=None, keep_default_na=False,
+ na_values={2: "", 6: "214.008",
+ 1: "blah", 0: 113125})
+ tm.assert_frame_equal(df, expected)
+
+ df = self.read_csv(StringIO(data), header=None, keep_default_na=False,
+ na_values={2: "", 6: "214.008",
+ 1: "blah", 0: "113125"})
+ tm.assert_frame_equal(df, expected)
+
def test_na_values_na_filter_override(self):
data = """\
A,B
| Patches very buggy behavior of `keep_default_na=False` whenever `na_values` is a dict
* Respect `keep_default_na` for column that doesn't exist in `na_values` dictionary
* Don't crash / break when `na_value` is a scalar in the `na_values` dictionary.
In addition, clarifies documentation on the handling of the keep `keep_default_na` parameter with respect to `na_filter` and `na_values`.
Closes #19227.
cc @neilser | https://api.github.com/repos/pandas-dev/pandas/pulls/19260 | 2018-01-16T00:22:46Z | 2018-01-18T00:23:17Z | 2018-01-18T00:23:16Z | 2018-01-18T04:21:37Z |
DOC: corrects Expanding min_periods default in docstring | diff --git a/pandas/core/window.py b/pandas/core/window.py
index 76ba76b7a9da9..5d2fa16876c11 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -1286,7 +1286,7 @@ class Expanding(_Rolling_and_Expanding):
Parameters
----------
- min_periods : int, default None
+ min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
| Currently the doc string says the ``min_periods`` default is ``None``, while it in the code actually is 1.
This PR fixes this. | https://api.github.com/repos/pandas-dev/pandas/pulls/19259 | 2018-01-15T23:55:43Z | 2018-01-16T00:13:43Z | 2018-01-16T00:13:43Z | 2018-01-17T10:11:49Z |
PERF: remove use of Panel & perf in rolling corr/cov | diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 59cf7d090a622..75990d83f8212 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -11,8 +11,8 @@ class Methods(object):
[10, 1000],
['int', 'float'],
['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt',
- 'sum', 'corr', 'cov'])
- param_names = ['constructor', 'window', 'dtype', 'method']
+ 'sum'])
+ param_names = ['contructor', 'window', 'dtype', 'method']
def setup(self, constructor, window, dtype, method):
N = 10**5
@@ -23,6 +23,27 @@ def time_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
+class Pairwise(object):
+
+ sample_time = 0.2
+ params = ([10, 1000, None],
+ ['corr', 'cov'],
+ [True, False])
+ param_names = ['window', 'method', 'pairwise']
+
+ def setup(self, window, method, pairwise):
+ N = 10**4
+ arr = np.random.random(N)
+ self.df = pd.DataFrame(arr)
+
+ def time_pairwise(self, window, method, pairwise):
+ if window is None:
+ r = self.df.expanding()
+ else:
+ r = self.df.rolling(window=window)
+ getattr(r, method)(self.df, pairwise=pairwise)
+
+
class Quantile(object):
sample_time = 0.2
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 2bd2bb199bf1f..5db29cb76b106 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -383,7 +383,7 @@ Performance Improvements
- :func:`Series` / :func:`DataFrame` tab completion limits to 100 values, for better performance. (:issue:`18587`)
- Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`)
- Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`)
-
+- Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`)
.. _whatsnew_0230.docs:
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 0e92fc4edce85..a4c9848dca900 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -99,19 +99,15 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
if not dropna:
from pandas import MultiIndex
- try:
+ if table.index.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.index.levels),
names=table.index.names)
table = table.reindex(m, axis=0)
- except AttributeError:
- pass # it's a single level
- try:
+ if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels),
names=table.columns.names)
table = table.reindex(m, axis=1)
- except AttributeError:
- pass # it's a single level or a series
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 4d6a1de60f59b..a3f19ef50459d 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -1863,25 +1863,38 @@ def dataframe_from_int_dict(data, frame_template):
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
- # TODO: not the most efficient (perf-wise)
- # though not bad code-wise
- from pandas import Panel, MultiIndex, concat
-
- with warnings.catch_warnings(record=True):
- p = Panel.from_dict(results).swapaxes('items', 'major')
- if len(p.major_axis) > 0:
- p.major_axis = arg1.columns[p.major_axis]
- if len(p.minor_axis) > 0:
- p.minor_axis = arg2.columns[p.minor_axis]
-
- if len(p.items):
+ from pandas import MultiIndex, concat
+
+ result_index = arg1.index.union(arg2.index)
+ if len(result_index):
+
+ # construct result frame
result = concat(
- [p.iloc[i].T for i in range(len(p.items))],
- keys=p.items)
+ [concat([results[i][j]
+ for j, c in enumerate(arg2.columns)],
+ ignore_index=True)
+ for i, c in enumerate(arg1.columns)],
+ ignore_index=True,
+ axis=1)
+ result.columns = arg1.columns
+
+ # set the index and reorder
+ if arg2.columns.nlevels > 1:
+ result.index = MultiIndex.from_product(
+ arg2.columns.levels + [result_index])
+ result = result.reorder_levels([2, 0, 1]).sort_index()
+ else:
+ result.index = MultiIndex.from_product(
+ [range(len(arg2.columns)),
+ range(len(result_index))])
+ result = result.swaplevel(1, 0).sort_index()
+ result.index = MultiIndex.from_product(
+ [result_index] + [arg2.columns])
else:
+ # empty result
result = DataFrame(
- index=MultiIndex(levels=[arg1.index, arg1.columns],
+ index=MultiIndex(levels=[arg1.index, arg2.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
@@ -1890,9 +1903,9 @@ def dataframe_from_int_dict(data, frame_template):
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
- arg2.columns.names)
+ arg1.columns.names)
result.index = result.index.set_names(
- arg1.index.names + arg1.columns.names)
+ result_index.names + arg2.columns.names)
return result
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 22526d14a7168..dabdb1e8e689c 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -14,6 +14,7 @@
import pandas.tseries.offsets as offsets
from pandas.core.base import SpecificationError
from pandas.errors import UnsupportedFunctionCall
+from pandas.core.sorting import safe_sort
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.compat import range, zip
@@ -1645,7 +1646,7 @@ def compare(self, result, expected):
result = result.dropna().values
expected = expected.dropna().values
- tm.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
@@ -1670,15 +1671,19 @@ def test_no_flex(self, f):
def test_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=True
- results = [f(df) for df in self.df1s]
- for (df, result) in zip(self.df1s, results):
+ # note that we may construct the 1st level of the MI
+ # in a non-motononic way, so compare accordingly
+ results = []
+ for i, df in enumerate(self.df1s):
+ result = f(df)
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
- tm.assert_index_equal(result.index.levels[1],
- df.columns,
- check_names=False)
+ tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
+ safe_sort(df.columns.unique()))
tm.assert_index_equal(result.columns, df.columns)
+ results.append(df)
+
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@@ -1716,9 +1721,8 @@ def test_pairwise_with_other(self, f):
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
- tm.assert_index_equal(result.index.levels[1],
- self.df2.columns,
- check_names=False)
+ tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
+ safe_sort(self.df2.columns.unique()))
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
| closes #17917
```
before after ratio
[aa9e0024] [872fe711]
- 1.70s 16.02ms 0.01 rolling.Pairwise.time_pairwise(None, 'corr', True)
- 1.84s 17.12ms 0.01 rolling.Pairwise.time_pairwise(10, 'corr', True)
- 1.84s 16.98ms 0.01 rolling.Pairwise.time_pairwise(1000, 'corr', True)
- 1.74s 15.59ms 0.01 rolling.Pairwise.time_pairwise(None, 'cov', True)
- 1.77s 15.63ms 0.01 rolling.Pairwise.time_pairwise(1000, 'cov', True)
- 1.83s 14.62ms 0.01 rolling.Pairwise.time_pairwise(10, 'cov', True)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/19257 | 2018-01-15T23:37:15Z | 2018-02-01T12:45:16Z | 2018-02-01T12:45:16Z | 2018-02-01T12:45:16Z |
Fix Index mul/div ops with Series, closes #19080, #19042 | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 744f1e14533e7..7c54d44d9f25b 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -423,6 +423,9 @@ Conversion
- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`)
- Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`)
- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`)
+- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`)
+
+
-
-
- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a5949c62ad913..d5330768059fd 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -14,7 +14,7 @@
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.generic import (
- ABCSeries,
+ ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex,
ABCDateOffset)
@@ -4019,6 +4019,9 @@ def _add_numeric_methods_binary(cls):
def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
+ if isinstance(other, (ABCSeries, ABCDataFrame)):
+ return NotImplemented
+
other = self._validate_for_numeric_binop(other, op, opstr)
# handle time-based others
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 741dca6be0630..10a923c056be2 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -8,6 +8,7 @@
is_integer,
is_scalar,
is_int64_dtype)
+from pandas.core.dtypes.generic import ABCSeries
from pandas import compat
from pandas.compat import lrange, range, get_range_parameters
@@ -583,6 +584,8 @@ def _make_evaluate_binop(op, opstr, reversed=False, step=False):
"""
def _evaluate_numeric_binop(self, other):
+ if isinstance(other, ABCSeries):
+ return NotImplemented
other = self._validate_for_numeric_binop(other, op, opstr)
attrs = self._get_attributes_dict()
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index dcd592345b91c..1ce8ade50c071 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -68,12 +68,12 @@ def test_numeric_compat(self):
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype=arr_dtype))
- tm.assert_index_equal(result, didx)
+ tm.assert_series_equal(result, Series(didx))
- result = idx * Series(np.arange(5, dtype='float64') + 0.1)
- expected = Float64Index(np.arange(5, dtype='float64') *
- (np.arange(5, dtype='float64') + 0.1))
- tm.assert_index_equal(result, expected)
+ rng5 = np.arange(5, dtype='float64')
+ result = idx * Series(rng5 + 0.1)
+ expected = Series(rng5 * (rng5 + 0.1))
+ tm.assert_series_equal(result, expected)
# invalid
pytest.raises(TypeError,
@@ -95,16 +95,6 @@ def test_numeric_compat(self):
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
- result = divmod(idx, Series(full_like(idx.values, 2)))
- with np.errstate(all='ignore'):
- div, mod = divmod(
- idx.values,
- full_like(idx.values, 2),
- )
- expected = Index(div), Index(mod)
- for r, e in zip(result, expected):
- tm.assert_index_equal(r, e)
-
# test power calculations both ways, GH 14973
expected = pd.Float64Index(2.0**idx.values)
result = 2.0**idx
@@ -114,6 +104,18 @@ def test_numeric_compat(self):
result = idx**2.0
tm.assert_index_equal(result, expected)
+ @pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__')
+ def test_divmod_series(self):
+ idx = self.create_index()
+
+ result = divmod(idx, Series(full_like(idx.values, 2)))
+ with np.errstate(all='ignore'):
+ div, mod = divmod(idx.values, full_like(idx.values, 2))
+ expected = Series(div), Series(mod)
+
+ for r, e in zip(result, expected):
+ tm.assert_series_equal(r, e)
+
def test_explicit_conversions(self):
# GH 8608
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 3ec918e391860..962de91ed0581 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -152,11 +152,12 @@ def test_numeric_compat(self):
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
- tm.assert_index_equal(result, didx)
+ tm.assert_series_equal(result, Series(didx))
- result = idx * Series(np.arange(5, dtype='float64') + 0.1)
- tm.assert_index_equal(result, self._holder(np.arange(
- 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
+ rng5 = np.arange(5, dtype='float64')
+ result = idx * Series(rng5 + 0.1)
+ tm.assert_series_equal(result,
+ Series(self._holder(rng5 * (rng5 + 0.1))))
# invalid
pytest.raises(TypeError, lambda: idx * idx)
| - [x] closes #19080, closes #19042
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19253 | 2018-01-15T19:36:22Z | 2018-01-16T23:59:29Z | 2018-01-16T23:59:28Z | 2018-01-17T03:02:30Z |
Fix pd.NaT - Series | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 314a5a3f37311..672ef8be1f72a 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -416,10 +416,10 @@ Conversion
- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`)
- Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`)
- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`)
-
-
-
+-
+-
- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`)
+- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`)
Indexing
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 683be4c9aa3a8..39f9437f0cecf 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -156,7 +156,7 @@ cdef class _NaT(datetime):
neg_other = -other
return self + neg_other
- elif getattr(other, '_typ', None) in ['period',
+ elif getattr(other, '_typ', None) in ['period', 'series',
'periodindex', 'dateoffset']:
return NotImplemented
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 73e8c783ba882..a6b217a37bd0c 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -314,8 +314,7 @@ def test_nat_arithmetic_index():
@pytest.mark.parametrize('box, assert_func', [
(TimedeltaIndex, tm.assert_index_equal),
- pytest.param(Series, tm.assert_series_equal,
- marks=pytest.mark.xfail(reason='NaT - Series returns NaT'))
+ (Series, tm.assert_series_equal)
])
def test_nat_arithmetic_td64_vector(box, assert_func):
# GH#19124
| Tests will be added after #19139 is merged, since largely that will involve removing an "xfail" from tests that already exist there.
- [x] closes #19158
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19251 | 2018-01-15T19:02:06Z | 2018-01-16T11:17:41Z | 2018-01-16T11:17:41Z | 2018-02-11T21:59:11Z |
CLN: Remove unused core.internals methods | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 2482148af9308..5c3481ed6d4ff 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -303,10 +303,6 @@ def getitem_block(self, slicer, new_mgr_locs=None):
def shape(self):
return self.values.shape
- @property
- def itemsize(self):
- return self.values.itemsize
-
@property
def dtype(self):
return self.values.dtype
@@ -327,21 +323,6 @@ def concat_same_type(self, to_concat, placement=None):
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1))
- def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
- limit=None, mask_info=None):
- """
- Reindex using pre-computed indexer information
- """
- if axis < 1:
- raise AssertionError(
- 'axis must be at least 1, got {axis}'.format(axis=axis))
- if fill_value is None:
- fill_value = self.fill_value
-
- new_values = algos.take_nd(self.values, indexer, axis,
- fill_value=fill_value, mask_info=mask_info)
- return self.make_block(new_values)
-
def iget(self, i):
return self.values[i]
@@ -936,11 +917,8 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
new_values = self.values if inplace else self.values.copy()
- if hasattr(new, 'reindex_axis'):
- new = new.values
-
- if hasattr(mask, 'reindex_axis'):
- mask = mask.values
+ new = getattr(new, 'values', new)
+ mask = getattr(mask, 'values', mask)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
@@ -1297,8 +1275,7 @@ def eval(self, func, other, errors='raise', try_cast=False, mgr=None):
orig_other = other
values = self.values
- if hasattr(other, 'reindex_axis'):
- other = other.values
+ other = getattr(other, 'values', other)
# make sure that we can broadcast
is_transposed = False
@@ -1446,11 +1423,8 @@ def where(self, other, cond, align=True, errors='raise',
if transpose:
values = values.T
- if hasattr(other, 'reindex_axis'):
- other = other.values
-
- if hasattr(cond, 'reindex_axis'):
- cond = cond.values
+ other = getattr(other, 'values', other)
+ cond = getattr(cond, 'values', cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
@@ -2630,9 +2604,8 @@ def external_values(self):
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if is_object_dtype(dtype):
- f = lambda x: lib.Timestamp(x, tz=self.values.tz)
return lib.map_infer(
- self.values.ravel(), f).reshape(self.values.shape)
+ self.values.ravel(), self._box_func).reshape(self.values.shape)
return self.values
def _slice(self, slicer):
@@ -2760,10 +2733,6 @@ class SparseBlock(NonConsolidatableMixIn, Block):
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
- @property
- def itemsize(self):
- return self.dtype.itemsize
-
@property
def fill_value(self):
# return np.nan
@@ -2887,22 +2856,6 @@ def shift(self, periods, axis=0, mgr=None):
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
- def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
- limit=None, mask_info=None):
- """
- Reindex using pre-computed indexer information
- """
- if axis < 1:
- raise AssertionError(
- 'axis must be at least 1, got {axis}'.format(axis=axis))
-
- # taking on the 0th axis always here
- if fill_value is None:
- fill_value = self.fill_value
- return self.make_block_same_class(self.values.take(indexer),
- fill_value=fill_value,
- placement=self.mgr_locs)
-
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
@@ -3324,7 +3277,7 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False,
aligned_args = dict((k, kwargs[k])
for k in align_keys
- if hasattr(kwargs[k], 'reindex_axis'))
+ if hasattr(kwargs[k], 'values'))
for b in self.blocks:
if filter is not None:
@@ -4552,10 +4505,6 @@ def asobject(self):
"""
return self._block.get_values(dtype=object)
- @property
- def itemsize(self):
- return self._block.values.itemsize
-
@property
def _can_hold_na(self):
return self._block._can_hold_na
| De-duplicate some bits of DatetimeBlock and DatetimeTZBlock.
- [x] closes #19243
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19250 | 2018-01-15T18:48:20Z | 2018-01-21T19:30:14Z | 2018-01-21T19:30:14Z | 2018-01-23T04:40:18Z |
DEPR: change Panel DeprecationWarning -> FutureWarning | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 14949267fc37d..402b85ceb681a 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -225,6 +225,34 @@ If installed, we now require:
| openpyxl | 2.4.0 | |
+-----------------+-----------------+----------+
+.. _whatsnew_0230.api_breaking.deprecate_panel:
+
+Deprecate Panel
+^^^^^^^^^^^^^^^
+
+``Panel`` was deprecated in the 0.20.x release, showing as a ``DeprecationWarning``. Using ``Panel`` will now show a ``FutureWarning``. The recommended way to represent 3-D data are
+with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
+provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`, :issue:`18324`).
+
+.. ipython:: python
+ :okwarning:
+
+ p = tm.makePanel()
+ p
+
+Convert to a MultiIndex DataFrame
+
+.. ipython:: python
+
+ p.to_frame()
+
+Convert to an xarray DataArray
+
+.. ipython:: python
+ :okwarning:
+
+ p.to_xarray()
+
Build Changes
^^^^^^^^^^^^^
@@ -290,6 +318,7 @@ Deprecations
- :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`)
- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`).
+
.. _whatsnew_0230.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 26e7c192ad0af..1df69576e6ff2 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -151,7 +151,7 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
- DeprecationWarning, stacklevel=3)
+ FutureWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py
index 1ad1b06aaefa2..8b133e654a869 100644
--- a/pandas/tests/generic/test_label_or_level_utils.py
+++ b/pandas/tests/generic/test_label_or_level_utils.py
@@ -46,7 +46,7 @@ def df_duplabels(df):
@pytest.fixture
def panel():
- with tm.assert_produces_warning(DeprecationWarning,
+ with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
return pd.Panel()
| closes #18324
| https://api.github.com/repos/pandas-dev/pandas/pulls/19247 | 2018-01-15T13:39:04Z | 2018-01-16T00:14:37Z | 2018-01-16T00:14:37Z | 2018-01-16T00:15:01Z |
BUG: unsupported type Interval when writing dataframe to excel | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 246eab386b2ab..52d8cf5f0a66d 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -489,6 +489,8 @@ I/O
- Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`)
- Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`)
- Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`)
+- :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`)
+- :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for xls file type (:issue:`19242`, :issue:`9155`)
-
Plotting
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 92b29c8da7e3f..b03987e933bff 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -4,7 +4,7 @@
# ---------------------------------------------------------------------
# ExcelFile class
-from datetime import datetime, date, time, MINYEAR
+from datetime import datetime, date, time, MINYEAR, timedelta
import os
import abc
@@ -21,7 +21,6 @@
from pandas.io.common import (_is_url, _urlopen, _validate_header_arg,
get_filepath_or_buffer, _NA_VALUES,
_stringify_path)
-from pandas.core.indexes.period import Period
import pandas._libs.json as json
from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass,
string_types, OrderedDict)
@@ -777,17 +776,30 @@ def _pop_header_name(row, index_col):
def _conv_value(val):
- # Convert numpy types to Python types for the Excel writers.
+ """ Convert numpy types to Python types for the Excel writers.
+
+ Parameters
+ ----------
+ val : object
+ Value to be written into cells
+
+ Returns
+ -------
+ If val is a numpy int, float, or bool, then the equivalent Python
+ types are returned. :obj:`datetime`, :obj:`date`, and :obj:`timedelta`
+ are passed and formatting must be handled in the writer. :obj:`str`
+ representation is returned for all other types.
+ """
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
- elif isinstance(val, Period):
- val = "{val}".format(val=val)
- elif is_list_like(val):
- val = str(val)
+ elif isinstance(val, (datetime, date, timedelta)):
+ pass
+ else:
+ val = compat.to_str(val)
return val
@@ -1460,6 +1472,9 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
num_format_str = self.datetime_format
elif isinstance(cell.val, date):
num_format_str = self.date_format
+ elif isinstance(cell.val, timedelta):
+ delta = cell.val
+ val = delta.total_seconds() / float(86400)
stylekey = json.dumps(cell.style)
if num_format_str:
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 3263f71dea3c3..efbabcfd8fc4c 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2,7 +2,7 @@
import os
import sys
import warnings
-from datetime import datetime, date, time
+from datetime import datetime, date, time, timedelta
from distutils.version import LooseVersion
from functools import partial
from warnings import catch_warnings
@@ -1440,6 +1440,56 @@ def test_excel_date_datetime_format(self):
# to use df_expected to check the result
tm.assert_frame_equal(rs2, df_expected)
+ def test_to_excel_interval_no_labels(self):
+ # GH19242 - test writing Interval without labels
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ dtype=np.int64)
+ expected = frame.copy()
+ frame['new'] = pd.cut(frame[0], 10)
+ expected['new'] = pd.cut(expected[0], 10).astype(str)
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(expected, recons)
+
+ def test_to_excel_interval_labels(self):
+ # GH19242 - test writing Interval with labels
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ dtype=np.int64)
+ expected = frame.copy()
+ intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E',
+ 'F', 'G', 'H', 'I', 'J'])
+ frame['new'] = intervals
+ expected['new'] = pd.Series(list(intervals))
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(expected, recons)
+
+ def test_to_excel_timedelta(self):
+ # GH 19242, GH9155 - test writing timedelta to xls
+ _skip_if_no_xlrd()
+
+ with ensure_clean('.xls') as path:
+ frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
+ columns=['A'],
+ dtype=np.int64
+ )
+ expected = frame.copy()
+ frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x))
+ expected['new'] = expected['A'].apply(
+ lambda x: timedelta(seconds=x).total_seconds() / float(86400))
+ frame.to_excel(path, 'test1')
+ reader = ExcelFile(path)
+ recons = read_excel(reader, 'test1')
+ tm.assert_frame_equal(expected, recons)
+
def test_to_excel_periodindex(self):
_skip_if_no_xlrd()
| - [x] closes #19242
closes #9155
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19244 | 2018-01-15T04:08:46Z | 2018-01-27T16:56:34Z | 2018-01-27T16:56:33Z | 2018-01-27T16:56:50Z |
BUG: Fixes rounding error in Timestamp.floor() | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index ca625f492b61f..7800a7c391940 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -551,6 +551,7 @@ Datetimelike
- Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`)
- Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`)
- Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`)
+- Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`)
-
Timezones
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index b9be9c16eb6c3..6d91a96e32db7 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -59,6 +59,46 @@ cdef inline object create_timestamp_from_ts(int64_t value,
return ts_base
+def round_ns(values, rounder, freq):
+ """
+ Applies rounding function at given frequency
+
+ Parameters
+ ----------
+ values : int, :obj:`ndarray`
+ rounder : function
+ freq : str, obj
+
+ Returns
+ -------
+ int or :obj:`ndarray`
+ """
+ from pandas.tseries.frequencies import to_offset
+ unit = to_offset(freq).nanos
+ if unit < 1000:
+ # for nano rounding, work with the last 6 digits separately
+ # due to float precision
+ buff = 1000000
+ r = (buff * (values // buff) + unit *
+ (rounder((values % buff) * (1 / float(unit)))).astype('i8'))
+ else:
+ if unit % 1000 != 0:
+ msg = 'Precision will be lost using frequency: {}'
+ warnings.warn(msg.format(freq))
+
+ # GH19206
+ # to deal with round-off when unit is large
+ if unit >= 1e9:
+ divisor = 10 ** int(np.log10(unit / 1e7))
+ else:
+ divisor = 10
+
+ r = (unit * rounder((values * (divisor / float(unit))) / divisor)
+ .astype('i8'))
+
+ return r
+
+
# This is PITA. Because we inherit from datetime, which has very specific
# construction requirements, we need to do object instantiation in python
# (see Timestamp class above). This will serve as a C extension type that
@@ -590,28 +630,12 @@ class Timestamp(_Timestamp):
return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq)
def _round(self, freq, rounder):
-
- cdef:
- int64_t unit, r, value, buff = 1000000
- object result
-
- from pandas.tseries.frequencies import to_offset
- unit = to_offset(freq).nanos
if self.tz is not None:
value = self.tz_localize(None).value
else:
value = self.value
- if unit < 1000 and unit % 1000 != 0:
- # for nano rounding, work with the last 6 digits separately
- # due to float precision
- r = (buff * (value // buff) + unit *
- (rounder((value % buff) / float(unit))).astype('i8'))
- elif unit >= 1000 and unit % 1000 != 0:
- msg = 'Precision will be lost using frequency: {}'
- warnings.warn(msg.format(freq))
- r = (unit * rounder(value / float(unit)).astype('i8'))
- else:
- r = (unit * rounder(value / float(unit)).astype('i8'))
+
+ r = round_ns(value, rounder, freq)
result = Timestamp(r, unit='ns')
if self.tz is not None:
result = result.tz_localize(self.tz)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 8e77c7a7fa48c..4a526955d9bf4 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -36,6 +36,7 @@
from pandas._libs import lib, iNaT, NaT
from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
+from pandas._libs.tslibs.timestamps import round_ns
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
@@ -90,23 +91,9 @@ class TimelikeOps(object):
""")
def _round(self, freq, rounder):
-
- from pandas.tseries.frequencies import to_offset
- unit = to_offset(freq).nanos
# round the local times
values = _ensure_datetimelike_to_i8(self)
- if unit < 1000 and unit % 1000 != 0:
- # for nano rounding, work with the last 6 digits separately
- # due to float precision
- buff = 1000000
- result = (buff * (values // buff) + unit *
- (rounder((values % buff) / float(unit))).astype('i8'))
- elif unit >= 1000 and unit % 1000 != 0:
- msg = 'Precision will be lost using frequency: {}'
- warnings.warn(msg.format(freq))
- result = (unit * rounder(values / float(unit)).astype('i8'))
- else:
- result = (unit * rounder(values / float(unit)).astype('i8'))
+ result = round_ns(values, rounder, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 111f68ba14775..83e7a0cd68d63 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -126,6 +126,27 @@ def test_round(self, tz):
ts = '2016-10-17 12:00:00.001501031'
DatetimeIndex([ts]).round('1010ns')
+ @pytest.mark.parametrize('test_input, rounder, freq, expected', [
+ (['2117-01-01 00:00:45'], 'floor', '15s', ['2117-01-01 00:00:45']),
+ (['2117-01-01 00:00:45'], 'ceil', '15s', ['2117-01-01 00:00:45']),
+ (['2117-01-01 00:00:45.000000012'], 'floor', '10ns',
+ ['2117-01-01 00:00:45.000000010']),
+ (['1823-01-01 00:00:01.000000012'], 'ceil', '10ns',
+ ['1823-01-01 00:00:01.000000020']),
+ (['1823-01-01 00:00:01'], 'floor', '1s', ['1823-01-01 00:00:01']),
+ (['1823-01-01 00:00:01'], 'ceil', '1s', ['1823-01-01 00:00:01']),
+ (('NaT', '1823-01-01 00:00:01'), 'floor', '1s',
+ ('NaT', '1823-01-01 00:00:01')),
+ (('NaT', '1823-01-01 00:00:01'), 'ceil', '1s',
+ ('NaT', '1823-01-01 00:00:01'))
+ ])
+ def test_ceil_floor_edge(self, tz, test_input, rounder, freq, expected):
+ dt = DatetimeIndex(list(test_input))
+ func = getattr(dt, rounder)
+ result = func(freq)
+ expected = DatetimeIndex(list(expected))
+ assert expected.equals(result)
+
# ----------------------------------------------------------------
# DatetimeIndex.normalize
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 70c7308dd3991..8a6989c909cb2 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -10,7 +10,7 @@
from pandas.compat import PY3
from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR
-from pandas import Timestamp
+from pandas import Timestamp, NaT
class TestTimestampUnaryOps(object):
@@ -93,6 +93,29 @@ def test_round_frequencies(self, freq, expected):
result = stamp.round(freq=freq)
assert result == expected
+ @pytest.mark.parametrize('test_input, rounder, freq, expected', [
+ ('2117-01-01 00:00:45', 'floor', '15s', '2117-01-01 00:00:45'),
+ ('2117-01-01 00:00:45', 'ceil', '15s', '2117-01-01 00:00:45'),
+ ('2117-01-01 00:00:45.000000012', 'floor', '10ns',
+ '2117-01-01 00:00:45.000000010'),
+ ('1823-01-01 00:00:01.000000012', 'ceil', '10ns',
+ '1823-01-01 00:00:01.000000020'),
+ ('1823-01-01 00:00:01', 'floor', '1s', '1823-01-01 00:00:01'),
+ ('1823-01-01 00:00:01', 'ceil', '1s', '1823-01-01 00:00:01'),
+ ('NaT', 'floor', '1s', 'NaT'),
+ ('NaT', 'ceil', '1s', 'NaT')
+ ])
+ def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
+ dt = Timestamp(test_input)
+ func = getattr(dt, rounder)
+ result = func(freq)
+
+ if dt is NaT:
+ assert result is NaT
+ else:
+ expected = Timestamp(expected)
+ assert result == expected
+
def test_ceil(self):
dt = Timestamp('20130101 09:10:11')
result = dt.ceil('D')
| - [x] closes #19206
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19240 | 2018-01-14T23:53:18Z | 2018-02-07T15:25:39Z | 2018-02-07T15:25:38Z | 2018-02-07T15:30:33Z |
DOC: update the pandas.DataFrame.notna and pandas.Series.notna docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c95d7cacf8c97..bfb251b0995ec 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5591,7 +5591,7 @@ def asof(self, where, subset=None):
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
- strings `''` or :attr:`numpy.inf` are not considered NA values
+ strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
@@ -5653,14 +5653,63 @@ def isnull(self):
return isna(self).__finalize__(self)
_shared_docs['notna'] = """
- Return a boolean same-sized object indicating if the values are
- not NA.
+ Detect existing (non-missing) values.
+
+ Return a boolean same-sized object indicating if the values are not NA.
+ Non-missing values get mapped to True. Characters such as empty
+ strings ``''`` or :attr:`numpy.inf` are not considered NA values
+ (unless you set ``pandas.options.mode.use_inf_as_na = True``).
+ NA values, such as None or :attr:`numpy.NaN`, get mapped to False
+ values.
+
+ Returns
+ -------
+ %(klass)s
+ Mask of bool values for each element in %(klass)s that
+ indicates whether an element is not an NA value.
See Also
--------
- %(klass)s.isna : boolean inverse of notna
%(klass)s.notnull : alias of notna
+ %(klass)s.isna : boolean inverse of notna
+ %(klass)s.dropna : omit axes labels with missing values
notna : top-level notna
+
+ Examples
+ --------
+ Show which entries in a DataFrame are not NA.
+
+ >>> df = pd.DataFrame({'age': [5, 6, np.NaN],
+ ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
+ ... pd.Timestamp('1940-04-25')],
+ ... 'name': ['Alfred', 'Batman', ''],
+ ... 'toy': [None, 'Batmobile', 'Joker']})
+ >>> df
+ age born name toy
+ 0 5.0 NaT Alfred None
+ 1 6.0 1939-05-27 Batman Batmobile
+ 2 NaN 1940-04-25 Joker
+
+ >>> df.notna()
+ age born name toy
+ 0 True False True False
+ 1 True True True True
+ 2 False True True True
+
+ Show which entries in a Series are not NA.
+
+ >>> ser = pd.Series([5, 6, np.NaN])
+ >>> ser
+ 0 5.0
+ 1 6.0
+ 2 NaN
+ dtype: float64
+
+ >>> ser.notna()
+ 0 True
+ 1 True
+ 2 False
+ dtype: bool
"""
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
| Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [x] PR title is "DOC: update the <your-function-or-method> docstring"
- [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [x] It has been proofread on language by another sprint participant
Two Validations for pandas.DataFrame.notna and pandas.Series.notna (shared docs).
```
################################################################################
###################### Docstring (pandas.DataFrame.notna) ######################
################################################################################
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
bool of type DataFrame
Mask of True/False values for each element in DataFrame that
indicates whether an element is not an NA value
See Also
--------
DataFrame.notnull : alias of notna
DataFrame.isna : boolean inverse of notna
DataFrame.dropna : omit axes labels with missing values
notna : top-level notna
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.notna" correct. :)
```
```
################################################################################
####################### Docstring (pandas.Series.notna) #######################
################################################################################
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
bool of type Series
Mask of True/False values for each element in Series that
indicates whether an element is not an NA value
See Also
--------
Series.notnull : alias of notna
Series.isna : boolean inverse of notna
Series.dropna : omit axes labels with missing values
notna : top-level notna
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.Series.notna" correct. :)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/20160 | 2018-03-10T14:40:30Z | 2018-03-13T13:49:52Z | 2018-03-13T13:49:52Z | 2018-03-14T11:57:19Z |
DOC: update the pandas.Series.dt.total_seconds docstring | diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 969afccdbc755..b5a08fc0168e4 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -500,7 +500,57 @@ def f(x):
def total_seconds(self):
"""
- Total duration of each element expressed in seconds.
+ Return total duration of each element expressed in seconds.
+
+ This method is available directly on TimedeltaIndex and on Series
+ containing timedelta values under the ``.dt`` namespace.
+
+ Returns
+ -------
+ seconds : Float64Index or Series
+ When the calling object is a TimedeltaIndex, the return type is a
+ Float64Index. When the calling object is a Series, the return type
+ is Series of type `float64` whose index is the same as the
+ original.
+
+ See Also
+ --------
+ datetime.timedelta.total_seconds : Standard library version
+ of this method.
+ TimedeltaIndex.components : Return a DataFrame with components of
+ each Timedelta.
+
+ Examples
+ --------
+ **Series**
+
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
+ >>> s
+ 0 0 days
+ 1 1 days
+ 2 2 days
+ 3 3 days
+ 4 4 days
+ dtype: timedelta64[ns]
+
+ >>> s.dt.total_seconds()
+ 0 0.0
+ 1 86400.0
+ 2 172800.0
+ 3 259200.0
+ 4 345600.0
+ dtype: float64
+
+ **TimedeltaIndex**
+
+ >>> idx = pd.to_timedelta(np.arange(5), unit='d')
+ >>> idx
+ TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
+ dtype='timedelta64[ns]', freq=None)
+
+ >>> idx.total_seconds()
+ Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
+ dtype='float64')
"""
return Index(self._maybe_mask_results(1e-9 * self.asi8),
name=self.name)
| Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [X] PR title is "DOC: update the <your-function-or-method> docstring"
- [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [X] It has been proofread on language by another sprint participant
Please include the output of the validation script below between the "```" ticks:
```
################################################################################
################## Docstring (pandas.Series.dt.total_seconds) ##################
################################################################################
Return total duration of each element expressed in seconds.
Return a series with the same length and index as the original,
containing the length of each element expressed in seconds.
Returns
-------
s : pandas.Series
a series of type `float64`.
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
################################################################################
################################## Validation ##################################
################################################################################
Errors found:
Errors in parameters section
Parameters {'args', 'kwargs'} not documented
See Also section not found
```
If the validation script still gives errors, but you think there is a good reason
to deviate in this case (and there are certainly such cases), please state this
explicitly.
Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20159 | 2018-03-10T14:37:47Z | 2018-03-13T14:00:51Z | 2018-03-13T14:00:51Z | 2018-03-13T20:08:59Z |
DOC: update the pandas.DataFrame.plot.bar docsctring | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 2c2521ad50ce0..0c2560abb0165 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2835,19 +2835,86 @@ def line(self, x=None, y=None, **kwds):
def bar(self, x=None, y=None, **kwds):
"""
- Vertical bar plot
+ Vertical bar plot.
+
+ A bar plot is a plot that presents categorical data with
+ rectangular bars with lengths proportional to the values that they
+ represent. A bar plot shows comparisons among discrete categories. One
+ axis of the plot shows the specific categories being compared, and the
+ other axis represents a measured value.
Parameters
----------
- x, y : label or position, optional
- Coordinates for each point.
- `**kwds` : optional
+ x : label or position, optional
+ Allows plotting of one column versus another. If not specified,
+ the index of the DataFrame is used.
+ y : label or position, optional
+ Allows plotting of one column versus another. If not specified,
+ all numerical columns are used.
+ **kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
+ axes : matplotlib.axes.Axes or np.ndarray of them
+ An ndarray is returned with one :class:`matplotlib.axes.Axes`
+ per column when ``subplots=True``.
+
+ See Also
+ --------
+ pandas.DataFrame.plot.barh : Horizontal bar plot.
+ pandas.DataFrame.plot : Make plots of a DataFrame.
+ matplotlib.pyplot.bar : Make a bar plot with matplotlib.
+
+ Examples
+ --------
+ Basic plot.
+
+ .. plot::
+ :context: close-figs
+
+ >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
+ >>> ax = df.plot.bar(x='lab', y='val', rot=0)
+
+ Plot a whole dataframe to a bar plot. Each column is assigned a
+ distinct color, and each row is nested in a group along the
+ horizontal axis.
+
+ .. plot::
+ :context: close-figs
+
+ >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
+ >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
+ >>> index = ['snail', 'pig', 'elephant',
+ ... 'rabbit', 'giraffe', 'coyote', 'horse']
+ >>> df = pd.DataFrame({'speed': speed,
+ ... 'lifespan': lifespan}, index=index)
+ >>> ax = df.plot.bar(rot=0)
+
+ Instead of nesting, the figure can be split by column with
+ ``subplots=True``. In this case, a :class:`numpy.ndarray` of
+ :class:`matplotlib.axes.Axes` are returned.
+
+ .. plot::
+ :context: close-figs
+
+ >>> axes = df.plot.bar(rot=0, subplots=True)
+ >>> axes[1].legend(loc=2) # doctest: +SKIP
+
+ Plot a single column.
+
+ .. plot::
+ :context: close-figs
+
+ >>> ax = df.plot.bar(y='speed', rot=0)
+
+ Plot only selected categories for the DataFrame.
+
+ .. plot::
+ :context: close-figs
+
+ >>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds)
| Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [x] PR title is "DOC: update the <your-function-or-method> docstring"
- [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [x] It has been proofread on language by another sprint participant
Please include the output of the validation script below between the "```" ticks:
```
################################################################################
#################### Docstring (pandas.DataFrame.plot.bar) ####################
################################################################################
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default None
Allows plotting of one column versus another.
y : label or position, default None
Allows plotting of one column versus another.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Examples
--------
Basic plot
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A','B','C'], 'val':[10,30,20]})
>>> ax = df.plot.bar(x='lab',y='val')
Plot a whole dataframe to a bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar()
Plot a column of the dataframe to a bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(y='speed')
Plot only selected categories for the dataframe
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(x='lifespan')
See Also
--------
pandas.DataFrame.plot.barh : Horizontal bar plot.
pandas.DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.pyplot.bar : Make a bar plot.
################################################################################
################################## Validation ##################################
################################################################################
Errors found:
Errors in parameters section
Parameters {'kwds'} not documented
Unknown parameters {'**kwds'}
```
If the validation script still gives errors, but you think there is a good reason
to deviate in this case (and there are certainly such cases), please state this
explicitly.
Small issue with **kwds docstring
| https://api.github.com/repos/pandas-dev/pandas/pulls/20158 | 2018-03-10T14:35:56Z | 2018-03-13T10:47:52Z | 2018-03-13T10:47:52Z | 2018-03-13T10:49:04Z |
DOC: Improved the docstring of pandas.plotting._core.FramePlotMethods… | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index f587cb91ab932..a791822042fbe 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1409,7 +1409,7 @@ def orientation(self):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
@@ -1917,7 +1917,7 @@ def _plot(data, x=None, y=None, subplots=False,
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
@@ -2581,7 +2581,7 @@ def line(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
@@ -2606,7 +2606,7 @@ def bar(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='bar', **kwds)
@@ -2622,7 +2622,7 @@ def barh(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='barh', **kwds)
@@ -2638,7 +2638,7 @@ def box(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='box', **kwds)
@@ -2656,7 +2656,7 @@ def hist(self, bins=10, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='hist', bins=bins, **kwds)
@@ -2715,7 +2715,7 @@ def area(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='area', **kwds)
@@ -2731,7 +2731,7 @@ def pie(self, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='pie', **kwds)
@@ -2783,7 +2783,7 @@ def line(self, x=None, y=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='line', x=x, y=y, **kwds)
@@ -2801,25 +2801,87 @@ def bar(self, x=None, y=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
- Horizontal bar plot
+ Make a horizontal bar plot.
+
+ A horizontal bar plot is a plot that presents quantitative data with
+ rectangular bars with lengths proportional to the values that they
+ represent. A bar plot shows comparisons among discrete categories. One
+ axis of the plot shows the specific categories being compared, and the
+ other axis represents a measured value.
Parameters
----------
- x, y : label or position, optional
- Coordinates for each point.
- `**kwds` : optional
- Additional keyword arguments are documented in
- :meth:`pandas.DataFrame.plot`.
+ x : label or position, default DataFrame.index
+ Column to be used for categories.
+ y : label or position, default All numeric columns in dataframe
+ Columns to be plotted from the DataFrame.
+ **kwds
+ Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them.
+
+ See Also
+ --------
+ pandas.DataFrame.plot.bar: Vertical bar plot.
+ pandas.DataFrame.plot : Make plots of DataFrame using matplotlib.
+ matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
+
+ Examples
+ --------
+ Basic example
+
+ .. plot::
+ :context: close-figs
+
+ >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
+ >>> ax = df.plot.barh(x='lab', y='val')
+
+ Plot a whole DataFrame to a horizontal bar plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
+ >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
+ >>> index = ['snail', 'pig', 'elephant',
+ ... 'rabbit', 'giraffe', 'coyote', 'horse']
+ >>> df = pd.DataFrame({'speed': speed,
+ ... 'lifespan': lifespan}, index=index)
+ >>> ax = df.plot.barh()
+
+ Plot a column of the DataFrame to a horizontal bar plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
+ >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
+ >>> index = ['snail', 'pig', 'elephant',
+ ... 'rabbit', 'giraffe', 'coyote', 'horse']
+ >>> df = pd.DataFrame({'speed': speed,
+ ... 'lifespan': lifespan}, index=index)
+ >>> ax = df.plot.barh(y='speed')
+
+ Plot DataFrame versus the desired column
+
+ .. plot::
+ :context: close-figs
+
+ >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
+ >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
+ >>> index = ['snail', 'pig', 'elephant',
+ ... 'rabbit', 'giraffe', 'coyote', 'horse']
+ >>> df = pd.DataFrame({'speed': speed,
+ ... 'lifespan': lifespan}, index=index)
+ >>> ax = df.plot.barh(x='lifespan')
"""
return self(kind='barh', x=x, y=y, **kwds)
@@ -2837,7 +2899,7 @@ def box(self, by=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='box', by=by, **kwds)
@@ -2857,7 +2919,7 @@ def hist(self, by=None, bins=10, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='hist', by=by, bins=bins, **kwds)
@@ -2921,7 +2983,7 @@ def area(self, x=None, y=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='area', x=x, y=y, **kwds)
@@ -2939,7 +3001,7 @@ def pie(self, y=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='pie', y=y, **kwds)
@@ -2961,7 +3023,7 @@ def scatter(self, x, y, s=None, c=None, **kwds):
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
@@ -2987,7 +3049,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
Returns
-------
- axes : matplotlib.AxesSubplot or np.array of them
+ axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
| ….barh()
- Added examples section
- Added extended summary
- Added argument explanation
Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [ X] PR title is "DOC: update the <your-function-or-method> docstring"
- [ X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [X ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ X] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [X ] It has been proofread on language by another sprint participant
Please include the output of the validation script below between the "```" ticks:
```
################################################################################
########### Docstring (pandas.plotting._core.FramePlotMethods.barh) ###########
################################################################################
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Column to be used for categories.
y : label or position, optional
Columns to be plotted from the DataFrame.
kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
See Also
--------
pandas.DataFrame.plot.bar: Vertical bar plot
pandas.DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A','B','C'], 'val':[10,30,20]})
>>> ax = df.plot.barh(x='lab',y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.plotting._core.FramePlotMethods.barh" correct. :)
```
If the validation script still gives errors, but you think there is a good reason
to deviate in this case (and there are certainly such cases), please state this
explicitly.
Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20157 | 2018-03-10T14:34:13Z | 2018-03-11T21:54:28Z | 2018-03-11T21:54:28Z | 2018-03-12T09:33:55Z |
DOC: update the pandas.DataFrame.plot.hist docstring | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 2c2521ad50ce0..2da9ad597a0bc 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2951,21 +2951,47 @@ def box(self, by=None, **kwds):
def hist(self, by=None, bins=10, **kwds):
"""
- Histogram
+ Draw one histogram of the DataFrame's columns.
+
+ A histogram is a representation of the distribution of data.
+ This function groups the values of all given Series in the DataFrame
+ into bins, and draws all bins in only one :ref:`matplotlib.axes.Axes`.
+ This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
- by : string or sequence
+ by : str or sequence, optional
Column in the DataFrame to group by.
- bins: integer, default 10
- Number of histogram bins to be used
- `**kwds` : optional
+ bins : int, default 10
+ Number of histogram bins to be used.
+ **kwds
Additional keyword arguments are documented in
:meth:`pandas.DataFrame.plot`.
Returns
-------
- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
+ axes : matplotlib.AxesSubplot histogram.
+
+ See Also
+ --------
+ DataFrame.hist : Draw histograms per DataFrame's Series.
+ Series.hist : Draw a histogram with Series' data.
+
+ Examples
+ --------
+ When we draw a dice 6000 times, we expect to get each value around 1000
+ times. But when we draw two dices and sum the result, the distribution
+ is going to be quite different. A histogram illustrates those
+ distributions.
+
+ .. plot::
+ :context: close-figs
+
+ >>> df = pd.DataFrame(
+ ... np.random.randint(1, 7, 6000),
+ ... columns = ['one'])
+ >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
+ >>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind='hist', by=by, bins=bins, **kwds)
| Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [x] PR title is "DOC: update the <your-function-or-method> docstring"
- [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [x] It has been proofread on language by another sprint participant
Please include the output of the validation script below between the "```" ticks:
```
################################################################################
#################### Docstring (pandas.DataFrame.plot.hist) ####################
################################################################################
Draw one histogram of the DataFrame's Series using matplotlib.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins, and draws all bins in only one matplotlib.AxesSubplot. This
is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds : optional
Parameters to pass on to :meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
See Also
--------
:meth:`pandas.DataFrame.hist` : Draw histograms per DataFrame's Series.
:meth:`pandas.Series.hist` : Draw a histogram with Series' data.
Examples
--------
When using values between 0 and 3, calling hist() with bins = 3 will
create three bins: one that groups values between 0 and 1, another
for values between 1 and 2, and another for values between 2 and 3.
We use alpha parameter to be able to see overlapping columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'length': [ 1.5, 0.5, 1.2, 0.9, 3],
... 'width': [ 0.7, 0.2, 0.15, 0.2, 1.1]
... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.plot.hist(bins = 3, xticks = range(4), alpha = 0.5)
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.plot.hist" correct. :)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/20155 | 2018-03-10T14:33:04Z | 2018-03-19T20:57:20Z | 2018-03-19T20:57:20Z | 2018-03-19T20:57:39Z |
DOC: Update the pandas.Series.str.count() docstring (Delhi) | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index fac607f4621a8..59d67b4088505 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -202,15 +202,65 @@ def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
+ This function is used to count the number of times a particular regex
+ pattern is repeated in each of the string elements of the
+ :class:`~pandas.Series`.
+
Parameters
----------
- pat : string, valid regular expression
- flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
+ pat : str
+ Valid regular expression.
+ flags : int, default 0, meaning no flags
+ Flags for the `re` module. For a complete list, `see here
+ <https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
+ **kwargs
+ For compatability with other string methods. Not used.
Returns
-------
- counts : Series/Index of integer values
+ counts : Series or Index
+ Same type as the calling object containing the integer counts.
+
+ Notes
+ -----
+ Some characters need to be escaped when passing in `pat`.
+ eg. ``'$'`` has a special meaning in regex and must be escaped when
+ finding this literal character.
+
+ See Also
+ --------
+ re : Standard library module for regular expressions.
+ str.count : Standard library version, without regular expression support.
+
+ Examples
+ --------
+ >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
+ >>> s.str.count('a')
+ 0 0.0
+ 1 0.0
+ 2 2.0
+ 3 2.0
+ 4 NaN
+ 5 0.0
+ 6 1.0
+ dtype: float64
+
+ Escape ``'$'`` to find the literal dollar sign.
+
+ >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
+ >>> s.str.count('\$')
+ 0 1
+ 1 0
+ 2 1
+ 3 2
+ 4 2
+ 5 0
+ dtype: int64
+
+ This is also available on Index
+
+ >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
+ Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
| Checklist for the pandas documentation sprint (ignore this if you are doing
an unrelated PR):
- [x] PR title is "DOC: update the <your-function-or-method> docstring"
- [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>`
- [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] The html version looks good: `python doc/make.py --single <your-function-or-method>`
- [x] It has been proofread on language by another sprint participant
Please include the output of the validation script below between the "```" ticks:
```
################################################################################
##################### Docstring (pandas.Series.str.count) #####################
################################################################################
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for re module, e.g. re.IGNORECASE.
Returns
-------
counts : Series/Index of integer values
Notes
-----
Some characters need to be escaped when passing in pat.
eg. '$' has a special meaning in regex and must be escaped when finding
specifically this char.
Examples
--------
Take a look at
`this link <https://docs.python.org/3/howto/regex.html#compilation-flags>`_
for the list of all possible flags that can be used.
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
################################################################################
################################## Validation ##################################
################################################################################
Errors found:
Errors in parameters section
Parameters {'kwargs'} not documented
See Also section not found
```
As discussed in the gitter channel it was suggested to ignore kwargs for now.
Not sure what should be a part of see also here. | https://api.github.com/repos/pandas-dev/pandas/pulls/20154 | 2018-03-10T14:28:29Z | 2018-03-14T18:29:54Z | 2018-03-14T18:29:54Z | 2018-03-19T11:26:36Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.