title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
ASV: add tests for indexing engines and Uint64Engine | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 8290731fd7eea..49d6311a7bb66 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -2,8 +2,9 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
- Float64Index, IntervalIndex, CategoricalIndex,
+from pandas import (Series, DataFrame, Panel, MultiIndex,
+ Int64Index, UInt64Index, Float64Index,
+ IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
@@ -11,7 +12,7 @@ class NumericSeriesIndexing(object):
goal_time = 0.2
params = [
- (Int64Index, Float64Index),
+ (Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
diff --git a/asv_bench/benchmarks/indexing_engines.py b/asv_bench/benchmarks/indexing_engines.py
new file mode 100644
index 0000000000000..243f2ada7be32
--- /dev/null
+++ b/asv_bench/benchmarks/indexing_engines.py
@@ -0,0 +1,54 @@
+import numpy as np
+
+from pandas._libs.index import (Int64Engine, UInt64Engine, Float64Engine,
+ ObjectEngine)
+
+
+class NumericEngineIndexing(object):
+
+ goal_time = 0.2
+ params = [[Int64Engine, UInt64Engine, Float64Engine],
+ [np.int64, np.uint64, np.float64],
+ ['monotonic_incr', 'monotonic_decr', 'non_monotonic'],
+ ]
+ param_names = ['engine', 'dtype', 'index_type']
+
+ def setup(self, engine, dtype, index_type):
+ N = 10**5
+ values = list([1] * N + [2] * N + [3] * N)
+ arr = {
+ 'monotonic_incr': np.array(values, dtype=dtype),
+ 'monotonic_decr': np.array(list(reversed(values)),
+ dtype=dtype),
+ 'non_monotonic': np.array([1, 2, 3] * N, dtype=dtype),
+ }[index_type]
+
+ self.data = engine(lambda: arr, len(arr))
+ # code belows avoids populating the mapping etc. while timing.
+ self.data.get_loc(2)
+
+ def time_get_loc(self, engine, dtype, index_type):
+ self.data.get_loc(2)
+
+
+class ObjectEngineIndexing(object):
+
+ goal_time = 0.2
+ params = [('monotonic_incr', 'monotonic_decr', 'non_monotonic')]
+ param_names = ['index_type']
+
+ def setup(self, index_type):
+ N = 10**5
+ values = list('a' * N + 'b' * N + 'c' * N)
+ arr = {
+ 'monotonic_incr': np.array(values, dtype=object),
+ 'monotonic_decr': np.array(list(reversed(values)), dtype=object),
+ 'non_monotonic': np.array(list('abc') * N, dtype=object),
+ }[index_type]
+
+ self.data = ObjectEngine(lambda: arr, len(arr))
+ # code belows avoids populating the mapping etc. while timing.
+ self.data.get_loc('b')
+
+ def time_get_loc(self, index_type):
+ self.data.get_loc('b')
| This is an offspring from #21699 to do the the ASV tests in a contained PR.
For reference the output from running ``asv run -b indexing_engines`` is:
```
· Creating environments
· Discovering benchmarks
·· Uninstalling from conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
·· Installing into conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
· Running 2 total benchmarks (1 commits * 1 environments * 2 benchmarks)
[ 0.00%] · For pandas commit hash b28cf5aa:
[ 0.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 50.00%] ··· Running indexing_engines.NumericEngineIndexing.time_get_loc 5.13±0.2μs;...
[100.00%] ··· Running indexing_engines.ObjectEngineIndexing.time_get_loc 4.24±0.08μs;...
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/23090 | 2018-10-11T12:34:13Z | 2018-10-18T16:12:19Z | 2018-10-18T16:12:19Z | 2018-10-18T17:46:22Z |
Fix ASV imports | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index e83efdd0fa2a0..8290731fd7eea 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -2,10 +2,9 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index,
- IntervalIndex, CategoricalIndex,
+from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
+ Float64Index, IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
-from .pandas_vb_common import Panel
class NumericSeriesIndexing(object):
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 57811dec8cd29..3524a5adb5450 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -11,8 +11,6 @@
except ImportError:
from pandas import ordered_merge as merge_ordered
-from .pandas_vb_common import Panel
-
class Append(object):
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index b87583ef925f3..c2e5bfa175feb 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -3,8 +3,6 @@
from pandas import DataFrame, Panel, DatetimeIndex, date_range
-from .pandas_vb_common import Panel
-
class DifferentIndexes(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index e35455f36ed98..542af44a78ffe 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -3,8 +3,6 @@
import numpy as np
from pandas import Panel
-from .pandas_vb_common import Panel
-
class PanelMethods(object):
| #22947 broke the ASV - second time in a few days that linting the benchmarks has broken something (see #22978 / #22886)
@datapythonista, could you please require that PRs try
```
cd asv_bench
asv dev
```
to see if *collection* of benchmark works? This takes about 30 seconds and then the asv run can be aborted with CTRL+C.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23085 | 2018-10-11T05:41:24Z | 2018-10-11T06:19:36Z | 2018-10-11T06:19:36Z | 2018-12-03T06:47:42Z |
PERF: Override PeriodIndex.unique | diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index c34f9a737473e..29b8c7efda40c 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -119,3 +119,6 @@ def time_align(self):
def time_intersection(self):
self.index[:750].intersection(self.index[250:])
+
+ def time_unique(self):
+ self.index.unique()
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 40dd48880e0eb..b8607136197e4 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -684,6 +684,7 @@ Performance Improvements
(:issue:`21372`)
- Improved the performance of :func:`pandas.get_dummies` with ``sparse=True`` (:issue:`21997`)
- Improved performance of :func:`IndexEngine.get_indexer_non_unique` for sorted, non-unique indexes (:issue:`9466`)
+- Improved performance of :func:`PeriodIndex.unique` (:issue:`23083`)
.. _whatsnew_0240.docs:
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 7833dd851db34..f151389b02463 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -29,6 +29,7 @@
DIFFERENT_FREQ_INDEX)
from pandas._libs.tslibs import resolution, period
+from pandas.core.algorithms import unique1d
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.period import PeriodArrayMixin, dt64arr_to_periodarr
from pandas.core.base import _shared_docs
@@ -539,6 +540,18 @@ def _get_unique_index(self, dropna=False):
res = res.dropna()
return res
+ @Appender(Index.unique.__doc__)
+ def unique(self, level=None):
+ # override the Index.unique method for performance GH#23083
+ if level is not None:
+ # this should never occur, but is retained to make the signature
+ # match Index.unique
+ self._validate_index_level(level)
+
+ values = self._ndarray_values
+ result = unique1d(values)
+ return self._shallow_copy(result)
+
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
| In trying to simplify the mess that is the PeriodIndex constructors, I found that PeriodIndex.unique is doing an unfortunate conversion to object-dtype. This PR avoids that and gets a nice speedup.
```
In [2]: pi = pd.period_range('1000Q1', periods=10000, freq='Q')
In [3]: %timeit pi.unique()
The slowest run took 6.25 times longer than the fastest. This could mean that an intermediate result is being cached.
1000 loops, best of 3: 226 µs per loop <-- PR
10 loops, best of 3: 24.7 ms per loop <-- master
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/23083 | 2018-10-11T02:15:12Z | 2018-10-11T11:42:37Z | 2018-10-11T11:42:37Z | 2018-10-11T15:32:04Z |
Support ExtensionArray in hash_pandas_object | diff --git a/doc/source/api.rst b/doc/source/api.rst
index ffa240febf731..f57531fffaaaa 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -245,6 +245,15 @@ Top-level evaluation
eval
+Hashing
+~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ util.hash_array
+ util.hash_pandas_object
+
Testing
~~~~~~~
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 40dd48880e0eb..5e9ce875dddb8 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -548,6 +548,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`).
- Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`)
- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`)
+- Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`)
- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
.. _whatsnew_0240.api.incompatibilities:
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index efe587c6aaaad..627afd1b6f860 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -466,6 +466,11 @@ def _values_for_factorize(self):
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
+
+ Notes
+ -----
+ The values returned by this method are also used in
+ :func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index e62d70847437c..e41885d525653 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -11,7 +11,7 @@
ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.common import (
- is_categorical_dtype, is_list_like)
+ is_categorical_dtype, is_list_like, is_extension_array_dtype)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import infer_dtype_from_scalar
@@ -265,10 +265,13 @@ def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
# numpy if categorical is a subdtype of complex, as it will choke).
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
+ elif is_extension_array_dtype(dtype):
+ vals, _ = vals._values_for_factorize()
+ dtype = vals.dtype
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
- elif np.issubdtype(dtype, np.complex128):
+ if np.issubdtype(dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py
index beb7948f2c14b..2a4a1b9c4668b 100644
--- a/pandas/tests/extension/base/base.py
+++ b/pandas/tests/extension/base/base.py
@@ -2,6 +2,7 @@
class BaseExtensionTests(object):
+ assert_equal = staticmethod(tm.assert_equal)
assert_series_equal = staticmethod(tm.assert_series_equal)
assert_frame_equal = staticmethod(tm.assert_frame_equal)
assert_extension_array_equal = staticmethod(
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 4e7886dd2e943..dce91d5a9ca9c 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -164,3 +164,13 @@ def test_container_shift(self, data, frame, periods, indices):
compare = self.assert_series_equal
compare(result, expected)
+
+ @pytest.mark.parametrize("as_frame", [True, False])
+ def test_hash_pandas_object_works(self, data, as_frame):
+ # https://github.com/pandas-dev/pandas/issues/23066
+ data = pd.Series(data)
+ if as_frame:
+ data = data.to_frame()
+ a = pd.util.hash_pandas_object(data)
+ b = pd.util.hash_pandas_object(data)
+ self.assert_equal(a, b)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 115afdcc99f2b..6c8b12ed865fc 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -199,6 +199,10 @@ def test_combine_le(self, data_repeated):
def test_combine_add(self, data_repeated):
pass
+ @unhashable
+ def test_hash_pandas_object_works(self, data, kind):
+ super().test_hash_pandas_object_works(data, kind)
+
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
| Closes #23066 | https://api.github.com/repos/pandas-dev/pandas/pulls/23082 | 2018-10-11T02:03:43Z | 2018-10-11T11:41:57Z | 2018-10-11T11:41:57Z | 2018-10-11T11:42:05Z |
STY: avoid backslash | diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index a6f586c7f2638..d4e3def84664b 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -60,8 +60,8 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
if not _np_version_under1p11:
# is_list_like
- if hasattr(arr, '__iter__') and not \
- isinstance(arr, string_and_binary_types):
+ if (hasattr(arr, '__iter__') and
+ not isinstance(arr, string_and_binary_types)):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index e91cc8ec1e996..4607aba070cfc 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -275,8 +275,8 @@ def match(to_match, values, na_sentinel=-1):
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
- result = Series(result.ravel()).replace(-1, na_sentinel).values.\
- reshape(result.shape)
+ result = Series(result.ravel()).replace(-1, na_sentinel)
+ result = result.values.reshape(result.shape)
return result
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 00c049497c0d8..91ae8375c233a 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -395,8 +395,8 @@ def nested_renaming_depr(level=4):
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
- elif isinstance(obj, ABCDataFrame) and \
- k not in obj.columns:
+ elif (isinstance(obj, ABCDataFrame) and
+ k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bb82c531b698e..1158a025b1319 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5651,8 +5651,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
# fill in 2d chunks
result = {col: s.fillna(method=method, value=value)
for col, s in self.iteritems()}
- new_obj = self._constructor.\
- from_dict(result).__finalize__(self)
+ prelim_obj = self._constructor.from_dict(result)
+ new_obj = prelim_obj.__finalize__(self)
new_data = new_obj._data
else:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f15b1203a334e..957f3be8cf6ae 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1027,8 +1027,9 @@ def nunique(self, dropna=True):
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
- assert val.dtype == object, \
- 'val.dtype must be object, got %s' % val.dtype
+ msg = ('val.dtype must be object, got {dtype}'
+ .format(dtype=val.dtype))
+ assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61dadd833be35..025be781d9ee8 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -578,8 +578,8 @@ def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
- if 'axis' not in kwargs_with_axis or \
- kwargs_with_axis['axis'] is None:
+ if ('axis' not in kwargs_with_axis or
+ kwargs_with_axis['axis'] is None):
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
@@ -1490,8 +1490,10 @@ def nth(self, n, dropna=None):
self._set_group_selection()
if not dropna:
- mask = np.in1d(self._cumcount_array(), nth_values) | \
- np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
+ mask_left = np.in1d(self._cumcount_array(), nth_values)
+ mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,
+ -nth_values)
+ mask = mask_left | mask_right
out = self._selected_obj[mask]
if not self.as_index:
@@ -1552,8 +1554,8 @@ def nth(self, n, dropna=None):
result.loc[mask] = np.nan
# reset/reindex to the original groups
- if len(self.obj) == len(dropped) or \
- len(result) == len(self.grouper.result_index):
+ if (len(self.obj) == len(dropped) or
+ len(result) == len(self.grouper.result_index)):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index e7144fb1d2932..1c8fe0e6cadad 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -157,8 +157,8 @@ def _set_grouper(self, obj, sort=False):
if self.key is not None:
key = self.key
# The 'on' is already defined
- if getattr(self.grouper, 'name', None) == key and \
- isinstance(obj, ABCSeries):
+ if (getattr(self.grouper, 'name', None) == key and
+ isinstance(obj, ABCSeries)):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
@@ -530,9 +530,9 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
except Exception:
all_in_columns_index = False
- if not any_callable and not all_in_columns_index and \
- not any_arraylike and not any_groupers and \
- match_axis_length and level is None:
+ if (not any_callable and not all_in_columns_index and
+ not any_arraylike and not any_groupers and
+ match_axis_length and level is None):
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
@@ -593,15 +593,15 @@ def is_in_obj(gpr):
# create the Grouping
# allow us to passing the actual Grouping as the gpr
- ping = Grouping(group_axis,
- gpr,
- obj=obj,
- name=name,
- level=level,
- sort=sort,
- observed=observed,
- in_axis=in_axis) \
- if not isinstance(gpr, Grouping) else gpr
+ ping = (Grouping(group_axis,
+ gpr,
+ obj=obj,
+ name=name,
+ level=level,
+ sort=sort,
+ observed=observed,
+ in_axis=in_axis)
+ if not isinstance(gpr, Grouping) else gpr)
groupings.append(ping)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index d9f7b4d9c31c3..b199127ac867b 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -521,8 +521,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1,
result = result.astype('float64')
result[mask] = np.nan
- if kind == 'aggregate' and \
- self._filter_empty_groups and not counts.all():
+ if (kind == 'aggregate' and
+ self._filter_empty_groups and not counts.all()):
if result.ndim == 2:
try:
result = lib.row_bool_subset(
@@ -743,8 +743,9 @@ def group_info(self):
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
- return comp_ids.astype('int64', copy=False), \
- obs_group_ids.astype('int64', copy=False), ngroups
+ return (comp_ids.astype('int64', copy=False),
+ obs_group_ids.astype('int64', copy=False),
+ ngroups)
@cache_readonly
def ngroups(self):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b3c913f21dd86..0c6aaf4b46d6a 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1836,8 +1836,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
- if isinstance(key, compat.string_types) and \
- labels.levels[0].is_all_dates:
+ if (isinstance(key, compat.string_types) and
+ labels.levels[0].is_all_dates):
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
@@ -1847,8 +1847,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
- if isinstance(component, compat.string_types) and \
- labels.levels[i].is_all_dates:
+ if (isinstance(component, compat.string_types) and
+ labels.levels[i].is_all_dates):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 6d67070000dcd..1fc9d961285be 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -184,8 +184,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
if len(values) and values[0] is None:
fill_value = None
- if getattr(self.block, 'is_datetimetz', False) or \
- is_datetimetz(empty_dtype):
+ if (getattr(self.block, 'is_datetimetz', False) or
+ is_datetimetz(empty_dtype)):
if self.block is None:
array = empty_dtype.construct_array_type()
missing_arr = array([fill_value], dtype=empty_dtype)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index b525dddeb1ba5..22e591e776a22 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -140,8 +140,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
margins_name=margins_name, fill_value=fill_value)
# discard the top level
- if values_passed and not values_multi and not table.empty and \
- (table.columns.nlevels > 1):
+ if (values_passed and not values_multi and not table.empty and
+ (table.columns.nlevels > 1)):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 50f6e310705d7..495e59d0882de 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -745,9 +745,8 @@ def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
- len_msg = \
- len_msg.format(name=name, len_item=len(item),
- len_enc=data_to_encode.shape[1])
+ len_msg = len_msg.format(name=name, len_item=len(item),
+ len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index eb8d2b0b6c809..dcba51d26980f 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -724,8 +724,9 @@ def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
- result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
- astype('M8[ns]')
+
+ masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
+ result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
diff --git a/pandas/core/window.py b/pandas/core/window.py
index ea0ec79d655fb..7d48967602bc1 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -98,11 +98,11 @@ def is_freq_type(self):
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
- if self.min_periods is not None and not \
- is_integer(self.min_periods):
+ if (self.min_periods is not None and
+ not is_integer(self.min_periods)):
raise ValueError("min_periods must be an integer")
- if self.closed is not None and self.closed not in \
- ['right', 'both', 'left', 'neither']:
+ if (self.closed is not None and
+ self.closed not in ['right', 'both', 'left', 'neither']):
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 405911eda7e9e..9bf7c5af2cd3a 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -430,8 +430,8 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
handles.append(f)
# in Python 3, convert BytesIO or fileobjects passed with an encoding
- if compat.PY3 and is_text and\
- (compression or isinstance(f, need_text_wrapping)):
+ if (compat.PY3 and is_text and
+ (compression or isinstance(f, need_text_wrapping))):
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
handles.append(f)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 00b4c704c681b..c1cbccb7cbf1c 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -1755,14 +1755,14 @@ def convert(cls, style_dict, num_format_str=None):
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
- 'mediumDashDotDot', 'slantDashDot'].\
- index(props[k])
+ 'mediumDashDotDot',
+ 'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), string_types):
- props['font_script'] = ['baseline', 'superscript', 'subscript'].\
- index(props['font_script'])
+ props['font_script'] = ['baseline', 'superscript',
+ 'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), string_types):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index f4bb53ba4f218..ad6ad5bcaf309 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -14,10 +14,9 @@
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
- msg = "pandas.Styler requires jinja2. "\
- "Please install with `conda install Jinja2`\n"\
- "or `pip install Jinja2`"
- raise ImportError(msg)
+ raise ImportError("pandas.Styler requires jinja2. "
+ "Please install with `conda install Jinja2`\n"
+ "or `pip install Jinja2`")
from pandas.core.dtypes.common import is_float, is_string_like
diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py
index 2846525adbe6b..ac73363b92b1e 100644
--- a/pandas/io/formats/terminal.py
+++ b/pandas/io/formats/terminal.py
@@ -40,9 +40,8 @@ def get_terminal_size():
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
- if current_os == 'Linux' or \
- current_os == 'Darwin' or \
- current_os.startswith('CYGWIN'):
+ if (current_os == 'Linux' or current_os == 'Darwin' or
+ current_os.startswith('CYGWIN')):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 03f0905d2023a..5c7b964cf69d1 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -250,11 +250,10 @@ def _recursive_extract(data, path, seen_meta, level=0):
if errors == 'ignore':
meta_val = np.nan
else:
- raise \
- KeyError("Try running with "
- "errors='ignore' as key "
- "{err} is not always present"
- .format(err=e))
+ raise KeyError("Try running with "
+ "errors='ignore' as key "
+ "{err} is not always present"
+ .format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 2def3b81c9518..1edc6f6e14442 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -883,15 +883,15 @@ def _clean_options(self, options, engine):
# C engine not supported yet
if engine == 'c':
if options['skipfooter'] > 0:
- fallback_reason = "the 'c' engine does not support"\
- " skipfooter"
+ fallback_reason = ("the 'c' engine does not support"
+ " skipfooter")
engine = 'python'
encoding = sys.getfilesystemencoding() or 'utf-8'
if sep is None and not delim_whitespace:
if engine == 'c':
- fallback_reason = "the 'c' engine does not support"\
- " sep=None with delim_whitespace=False"
+ fallback_reason = ("the 'c' engine does not support"
+ " sep=None with delim_whitespace=False")
engine = 'python'
elif sep is not None and len(sep) > 1:
if engine == 'c' and sep == r'\s+':
@@ -899,10 +899,10 @@ def _clean_options(self, options, engine):
del result['delimiter']
elif engine not in ('python', 'python-fwf'):
# wait until regex engine integrated
- fallback_reason = "the 'c' engine does not support"\
- " regex separators (separators > 1 char and"\
- r" different from '\s+' are"\
- " interpreted as regex)"
+ fallback_reason = ("the 'c' engine does not support"
+ " regex separators (separators > 1 char and"
+ r" different from '\s+' are"
+ " interpreted as regex)")
engine = 'python'
elif delim_whitespace:
if 'python' in engine:
@@ -915,10 +915,10 @@ def _clean_options(self, options, engine):
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ('python', 'python-fwf'):
- fallback_reason = "the separator encoded in {encoding}" \
- " is > 1 char long, and the 'c' engine" \
- " does not support such separators".format(
- encoding=encoding)
+ fallback_reason = ("the separator encoded in {encoding}"
+ " is > 1 char long, and the 'c' engine"
+ " does not support such separators"
+ .format(encoding=encoding))
engine = 'python'
quotechar = options['quotechar']
@@ -3203,8 +3203,8 @@ def _clean_index_names(columns, index_col):
index_names.append(name)
# hack
- if isinstance(index_names[0], compat.string_types)\
- and 'Unnamed' in index_names[0]:
+ if (isinstance(index_names[0], compat.string_types) and
+ 'Unnamed' in index_names[0]):
index_names[0] = None
return index_names, columns, index_col
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ff37036533b4f..d2b523461104c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1804,8 +1804,8 @@ def validate_metadata(self, handler):
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
- if new_metadata is not None and cur_metadata is not None \
- and not array_equivalent(new_metadata, cur_metadata):
+ if (new_metadata is not None and cur_metadata is not None and
+ not array_equivalent(new_metadata, cur_metadata)):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index a321e315f5225..68b2182c2ff07 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -442,8 +442,8 @@ def parse_dates_safe(dates, delta=False, year=False, days=False):
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
- conv_dates = 2 * (d.year - stata_epoch.year) + \
- (d.month > 6).astype(np.int)
+ conv_dates = (2 * (d.year - stata_epoch.year) +
+ (d.month > 6).astype(np.int))
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
@@ -568,16 +568,18 @@ def _cast_to_stata_types(data):
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
- msg = 'Column {0} has a maximum value of infinity which is ' \
- 'outside the range supported by Stata.'
- raise ValueError(msg.format(col))
+ raise ValueError('Column {col} has a maximum value of '
+ 'infinity which is outside the range '
+ 'supported by Stata.'.format(col=col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
- msg = 'Column {0} has a maximum value ({1}) outside the ' \
- 'range supported by Stata ({1})'
- raise ValueError(msg.format(col, value, float64_max))
+ raise ValueError('Column {col} has a maximum value '
+ '({val}) outside the range supported by '
+ 'Stata ({float64_max})'
+ .format(col=col, val=value,
+ float64_max=float64_max))
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
@@ -1704,9 +1706,10 @@ def _do_convert_categoricals(self, data, value_label_dict, lbllist,
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
- msg = 'Value labels for column {0} are not unique. The ' \
- 'repeated labels are:\n{1}'.format(col, repeats)
- raise ValueError(msg)
+ raise ValueError('Value labels for column {col} are not '
+ 'unique. The repeated labels are:\n'
+ '{repeats}'
+ .format(col=col, repeats=repeats))
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
@@ -2066,8 +2069,8 @@ def _check_column_names(self, data):
name = text_type(name)
for c in name:
- if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
- (c < '0' or c > '9') and c != '_':
+ if ((c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and
+ (c < '0' or c > '9') and c != '_'):
name = name.replace(c, '_')
# Variable name must not be a reserved word
diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py
index 385d4d7f047c7..ff5351bb6c6ea 100644
--- a/pandas/plotting/__init__.py
+++ b/pandas/plotting/__init__.py
@@ -12,9 +12,9 @@
from pandas.plotting._style import plot_params
from pandas.plotting._tools import table
try:
- from pandas.plotting._converter import \
- register as register_matplotlib_converters
- from pandas.plotting._converter import \
- deregister as deregister_matplotlib_converters
+ from pandas.plotting._converter import (
+ register as register_matplotlib_converters)
+ from pandas.plotting._converter import (
+ deregister as deregister_matplotlib_converters)
except ImportError:
pass
| Related: #11954.
This gets most of the non-`textwrap.dedent` cases. | https://api.github.com/repos/pandas-dev/pandas/pulls/23073 | 2018-10-10T14:19:39Z | 2018-10-12T13:12:16Z | 2018-10-12T13:12:16Z | 2018-10-12T15:44:17Z |
Fixing memory leaks in read_csv | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index bb02bbb36424a..b9abf9293079f 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1382,6 +1382,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form
- Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`)
- Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`).
- Bug in :meth:`HDFStore.append` when appending a :class:`DataFrame` with an empty string column and ``min_itemsize`` < 8 (:issue:`12242`)
+- Bug in :func:`read_csv()` in which memory leaks occurred in the C engine when parsing ``NaN`` values due to insufficient cleanup on completion or error (:issue:`21353`)
- Bug in :func:`read_csv()` in which incorrect error messages were being raised when ``skipfooter`` was passed in along with ``nrows``, ``iterator``, or ``chunksize`` (:issue:`23711`)
- Bug in :meth:`read_csv()` in which :class:`MultiIndex` index names were being improperly handled in the cases when they were not provided (:issue:`23484`)
- Bug in :meth:`read_html()` in which the error message was not displaying the valid flavors when an invalid one was provided (:issue:`23549`)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 40aa03caa56eb..f74de79542628 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1070,18 +1070,6 @@ cdef class TextReader:
conv = self._get_converter(i, name)
- # XXX
- na_flist = set()
- if self.na_filter:
- na_list, na_flist = self._get_na_list(i, name)
- if na_list is None:
- na_filter = 0
- else:
- na_filter = 1
- na_hashset = kset_from_list(na_list)
- else:
- na_filter = 0
-
col_dtype = None
if self.dtype is not None:
if isinstance(self.dtype, dict):
@@ -1106,13 +1094,34 @@ cdef class TextReader:
self.c_encoding)
continue
- # Should return as the desired dtype (inferred or specified)
- col_res, na_count = self._convert_tokens(
- i, start, end, name, na_filter, na_hashset,
- na_flist, col_dtype)
+ # Collect the list of NaN values associated with the column.
+ # If we aren't supposed to do that, or none are collected,
+ # we set `na_filter` to `0` (`1` otherwise).
+ na_flist = set()
+
+ if self.na_filter:
+ na_list, na_flist = self._get_na_list(i, name)
+ if na_list is None:
+ na_filter = 0
+ else:
+ na_filter = 1
+ na_hashset = kset_from_list(na_list)
+ else:
+ na_filter = 0
- if na_filter:
- self._free_na_set(na_hashset)
+ # Attempt to parse tokens and infer dtype of the column.
+ # Should return as the desired dtype (inferred or specified).
+ try:
+ col_res, na_count = self._convert_tokens(
+ i, start, end, name, na_filter, na_hashset,
+ na_flist, col_dtype)
+ finally:
+ # gh-21353
+ #
+ # Cleanup the NaN hash that we generated
+ # to avoid memory leaks.
+ if na_filter:
+ self._free_na_set(na_hashset)
if upcast_na and na_count > 0:
col_res = _maybe_upcast(col_res)
@@ -2059,6 +2068,7 @@ cdef kh_str_t* kset_from_list(list values) except NULL:
# None creeps in sometimes, which isn't possible here
if not isinstance(val, bytes):
+ kh_destroy_str(table)
raise ValueError('Must be all encoded bytes')
k = kh_put_str(table, PyBytes_AsString(val), &ret)
| This PR fixes a memory leak in parsers.pyx detected by valgrind, and also adds some further cleanup that should avoid memory leaks on exceptions,
closes #21353
- Moved the allocation of na_hashset further down, closer to where it is used. Otherwise it will not be freed if `continue` is executed,
- Delete `na_hashset` if there is an exception,
- Also clean up the allocation inside `kset_from_list` before raising an exception. | https://api.github.com/repos/pandas-dev/pandas/pulls/23072 | 2018-10-10T08:33:17Z | 2018-11-19T12:09:41Z | 2018-11-19T12:09:40Z | 2020-07-23T13:45:25Z |
Corrected 'columns' argument of 'to_csv' method | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 039cba2993381..56da4dbea8706 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1603,7 +1603,7 @@ function takes a number of arguments. Only the first is required.
* ``sep`` : Field delimiter for the output file (default ",")
* ``na_rep``: A string representation of a missing value (default '')
* ``float_format``: Format string for floating point numbers
-* ``cols``: Columns to write (default None)
+* ``columns``: Columns to write (default None)
* ``header``: Whether to write out the column names (default True)
* ``index``: whether to write row (index) names (default True)
* ``index_label``: Column label(s) for index column(s) if desired. If None
| Compared the 'to_csv' method described in https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html with the same method described in https://pandas.pydata.org/pandas-docs/stable/io.html#writing-to-csv-format and noticed difference between the two in the 'columns' and 'cols' argument.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23068 | 2018-10-10T02:46:23Z | 2018-10-10T16:46:32Z | 2018-10-10T16:46:32Z | 2018-10-10T16:46:44Z |
Add allow_sets-kwarg to is_list_like | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 16f0b9ee99909..d786711ffa6ea 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -198,6 +198,8 @@ Other Enhancements
- :meth:`round`, :meth:`ceil`, and meth:`floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`)
- :class:`Resampler` now is iterable like :class:`GroupBy` (:issue:`15314`).
- :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`Resampler.quantile` (:issue:`15023`).
+- :meth:`pandas.core.dtypes.is_list_like` has gained a keyword ``allow_sets`` which is ``True`` by default; if ``False``,
+ all instances of ``set`` will not be considered "list-like" anymore (:issue:`23061`)
- :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`).
- New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`).
- Compatibility with Matplotlib 3.0 (:issue:`22790`).
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 1453725225e7d..5108e23c53b5a 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -141,6 +141,7 @@ def lfilter(*args, **kwargs):
Mapping = collections.abc.Mapping
Sequence = collections.abc.Sequence
Sized = collections.abc.Sized
+ Set = collections.abc.Set
else:
# Python 2
@@ -201,6 +202,7 @@ def get_range_parameters(data):
Mapping = collections.Mapping
Sequence = collections.Sequence
Sized = collections.Sized
+ Set = collections.Set
if PY2:
def iteritems(obj, **kw):
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 22da546355df6..af5e1523c7cec 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -16,10 +16,10 @@
ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass,
ABCDateOffset)
from pandas.core.dtypes.inference import ( # noqa:F401
- is_bool, is_integer, is_hashable, is_iterator, is_float,
- is_dict_like, is_scalar, is_string_like, is_list_like, is_number,
- is_file_like, is_re, is_re_compilable, is_sequence, is_nested_list_like,
- is_named_tuple, is_array_like, is_decimal, is_complex, is_interval)
+ is_bool, is_integer, is_float, is_number, is_decimal, is_complex,
+ is_re, is_re_compilable, is_dict_like, is_string_like, is_file_like,
+ is_list_like, is_nested_list_like, is_sequence, is_named_tuple,
+ is_hashable, is_iterator, is_array_like, is_scalar, is_interval)
_POSSIBLY_CAST_DTYPES = {np.dtype(t).name
for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 67f391615eedb..7470497383064 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -5,7 +5,7 @@
from numbers import Number
from pandas import compat
from pandas.compat import (PY2, string_types, text_type,
- string_and_binary_types, re_type)
+ string_and_binary_types, re_type, Set)
from pandas._libs import lib
is_bool = lib.is_bool
@@ -247,7 +247,7 @@ def is_re_compilable(obj):
return True
-def is_list_like(obj):
+def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
@@ -259,6 +259,10 @@ def is_list_like(obj):
Parameters
----------
obj : The object to check.
+ allow_sets : boolean, default True
+ If this parameter is False, sets will not be considered list-like
+
+ .. versionadded:: 0.24.0
Returns
-------
@@ -283,11 +287,15 @@ def is_list_like(obj):
False
"""
- return (isinstance(obj, compat.Iterable) and
+ return (isinstance(obj, compat.Iterable)
# we do not count strings/unicode/bytes as list-like
- not isinstance(obj, string_and_binary_types) and
+ and not isinstance(obj, string_and_binary_types)
+
# exclude zero-dimensional numpy arrays, effectively scalars
- not (isinstance(obj, np.ndarray) and obj.ndim == 0))
+ and not (isinstance(obj, np.ndarray) and obj.ndim == 0)
+
+ # exclude sets if allow_sets is False
+ and not (allow_sets is False and isinstance(obj, Set)))
def is_array_like(obj):
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 76cd6aabb93ae..d0dd03d6eb8df 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -47,6 +47,70 @@ def coerce(request):
return request.param
+# collect all objects to be tested for list-like-ness; use tuples of objects,
+# whether they are list-like or not (special casing for sets), and their ID
+ll_params = [
+ ([1], True, 'list'), # noqa: E241
+ ([], True, 'list-empty'), # noqa: E241
+ ((1, ), True, 'tuple'), # noqa: E241
+ (tuple(), True, 'tuple-empty'), # noqa: E241
+ ({'a': 1}, True, 'dict'), # noqa: E241
+ (dict(), True, 'dict-empty'), # noqa: E241
+ ({'a', 1}, 'set', 'set'), # noqa: E241
+ (set(), 'set', 'set-empty'), # noqa: E241
+ (frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
+ (frozenset([]), 'set', 'frozenset-empty'), # noqa: E241
+ (iter([1, 2]), True, 'iterator'), # noqa: E241
+ (iter([]), True, 'iterator-empty'), # noqa: E241
+ ((x for x in [1, 2]), True, 'generator'), # noqa: E241
+ ((x for x in []), True, 'generator-empty'), # noqa: E241
+ (Series([1]), True, 'Series'), # noqa: E241
+ (Series([]), True, 'Series-empty'), # noqa: E241
+ (Series(['a']).str, True, 'StringMethods'), # noqa: E241
+ (Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
+ (Index([1]), True, 'Index'), # noqa: E241
+ (Index([]), True, 'Index-empty'), # noqa: E241
+ (DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
+ (DataFrame(), True, 'DataFrame-empty'), # noqa: E241
+ (np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
+ (np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
+ (np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
+ (np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
+ (np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
+ (np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
+ (np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
+ (np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
+ (np.array(2), False, 'ndarray-0d'), # noqa: E241
+ (1, False, 'int'), # noqa: E241
+ (b'123', False, 'bytes'), # noqa: E241
+ (b'', False, 'bytes-empty'), # noqa: E241
+ ('123', False, 'string'), # noqa: E241
+ ('', False, 'string-empty'), # noqa: E241
+ (str, False, 'string-type'), # noqa: E241
+ (object(), False, 'object'), # noqa: E241
+ (np.nan, False, 'NaN'), # noqa: E241
+ (None, False, 'None') # noqa: E241
+]
+objs, expected, ids = zip(*ll_params)
+
+
+@pytest.fixture(params=zip(objs, expected), ids=ids)
+def maybe_list_like(request):
+ return request.param
+
+
+def test_is_list_like(maybe_list_like):
+ obj, expected = maybe_list_like
+ expected = True if expected == 'set' else expected
+ assert inference.is_list_like(obj) == expected
+
+
+def test_is_list_like_disallow_sets(maybe_list_like):
+ obj, expected = maybe_list_like
+ expected = False if expected == 'set' else expected
+ assert inference.is_list_like(obj, allow_sets=False) == expected
+
+
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
@@ -63,23 +127,6 @@ def __getitem__(self):
assert (not is_seq(A()))
-@pytest.mark.parametrize(
- "ll",
- [
- [], [1], (1, ), (1, 2), {'a': 1},
- {1, 'a'}, Series([1]),
- Series([]), Series(['a']).str,
- np.array([2])])
-def test_is_list_like_passes(ll):
- assert inference.is_list_like(ll)
-
-
-@pytest.mark.parametrize(
- "ll", [1, '2', object(), str, np.array(2)])
-def test_is_list_like_fails(ll):
- assert not inference.is_list_like(ll)
-
-
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
| - [x] closes ~~#23009~~ #23061
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is an attempt responding to https://github.com/pandas-dev/pandas/pull/22486#issuecomment-428159769:
> @h-vetinari why don't you try (separate PR) excluding set from is_list_like and see what the implications of that are.
Following some initial discussion in #23061, I decided to go with a variant that does not break anything - i.e. adding a keyword which defaults to the current behaviour. I've added a warning that's only raised if necessary, to note that this behaviour will be changed in the future -- regardless of whether it is deprecated or not, I think that users *as well as developers* should have to actively choose to include unordered sets (reason i.a. for #23009, and probably some more).
The tedious part of this PR was hunting down all the internal uses of `is_list_like` and adding the kwarg there to avoid raising the warning. Hope I didn't miss any.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23065 | 2018-10-09T21:37:23Z | 2018-10-18T15:53:33Z | 2018-10-18T15:53:33Z | 2018-10-22T14:16:59Z |
DEPS: drop numpy < 1.12 | diff --git a/ci/azure-macos-35.yaml b/ci/azure-macos-35.yaml
index a36f748ded812..6ccdc79d11b27 100644
--- a/ci/azure-macos-35.yaml
+++ b/ci/azure-macos-35.yaml
@@ -8,10 +8,10 @@ dependencies:
- html5lib
- jinja2
- lxml
- - matplotlib
+ - matplotlib=2.2.0
- nomkl
- numexpr
- - numpy=1.10.4
+ - numpy=1.12.0
- openpyxl=2.5.5
- pytables
- python=3.5*
diff --git a/ci/azure-windows-27.yaml b/ci/azure-windows-27.yaml
index bcd9ddee1715e..d48a9ba986a93 100644
--- a/ci/azure-windows-27.yaml
+++ b/ci/azure-windows-27.yaml
@@ -10,7 +10,7 @@ dependencies:
- html5lib
- jinja2=2.8
- lxml
- - matplotlib
+ - matplotlib=2.0.1
- numexpr
- numpy=1.12*
- openpyxl=2.5.5
diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml
index 9bfaef04ea2fa..fb10d89731f26 100644
--- a/ci/azure/macos.yml
+++ b/ci/azure/macos.yml
@@ -9,7 +9,7 @@ jobs:
strategy:
maxParallel: 11
matrix:
- py35_np_110:
+ py35_np_120:
ENV_FILE: ci/azure-macos-35.yaml
CONDA_PY: "35"
CONDA_ENV: pandas
diff --git a/ci/azure/windows-py27.yml b/ci/azure/windows-py27.yml
index 10251bc03b8dc..8718cc849b7a8 100644
--- a/ci/azure/windows-py27.yml
+++ b/ci/azure/windows-py27.yml
@@ -9,7 +9,7 @@ jobs:
strategy:
maxParallel: 11
matrix:
- py36_np14:
+ py36_np121:
ENV_FILE: ci/azure-windows-27.yaml
CONDA_PY: "27"
CONDA_ENV: pandas
diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml
index 84ec7e20fc8f1..5b726304cf414 100644
--- a/ci/circle-27-compat.yaml
+++ b/ci/circle-27-compat.yaml
@@ -3,18 +3,18 @@ channels:
- defaults
- conda-forge
dependencies:
- - bottleneck=1.0.0
+ - bottleneck=1.2.0
- cython=0.28.2
- jinja2=2.8
- - numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr
- - numpy=1.9.3
+ - numexpr=2.6.1
+ - numpy=1.12.0
- openpyxl=2.5.5
- psycopg2
- - pytables=3.2.2
+ - pytables=3.4.2
- python-dateutil=2.5.0
- python=2.7*
- pytz=2013b
- - scipy=0.14.0
+ - scipy=0.18.1
- sqlalchemy=0.7.8
- xlrd=0.9.2
- xlsxwriter=0.5.2
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt
index 376fdb1e14e3a..e9afd7a551b6e 100644
--- a/ci/requirements-optional-conda.txt
+++ b/ci/requirements-optional-conda.txt
@@ -1,6 +1,6 @@
beautifulsoup4>=4.2.1
blosc
-bottleneck
+bottleneck>=1.2.0
fastparquet
feather-format
gcsfs
@@ -9,17 +9,17 @@ ipython>=5.6.0
ipykernel
jinja2
lxml
-matplotlib
+matplotlib>=2.0.0
nbsphinx
-numexpr
+numexpr>=2.6.1
openpyxl=2.5.5
pyarrow
pymysql
-pytables
+pytables>=3.4.2
pytest-cov
pytest-xdist
s3fs
-scipy
+scipy>=0.18.1
seaborn
sqlalchemy
statsmodels
diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt
index 09ce8e59a3b46..ebe0c4ca88ee6 100644
--- a/ci/requirements-optional-pip.txt
+++ b/ci/requirements-optional-pip.txt
@@ -2,7 +2,7 @@
# Do not modify directly
beautifulsoup4>=4.2.1
blosc
-bottleneck
+bottleneck>=1.2.0
fastparquet
feather-format
gcsfs
@@ -11,9 +11,9 @@ ipython>=5.6.0
ipykernel
jinja2
lxml
-matplotlib
+matplotlib>=2.0.0
nbsphinx
-numexpr
+numexpr>=2.6.1
openpyxl==2.5.5
pyarrow
pymysql
@@ -21,7 +21,7 @@ tables
pytest-cov
pytest-xdist
s3fs
-scipy
+scipy>=0.18.1
seaborn
sqlalchemy
statsmodels
diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml
index aca65f27d4187..dc5580ae6d287 100644
--- a/ci/travis-27-locale.yaml
+++ b/ci/travis-27-locale.yaml
@@ -3,11 +3,11 @@ channels:
- defaults
- conda-forge
dependencies:
- - bottleneck=1.0.0
+ - bottleneck=1.2.0
- cython=0.28.2
- lxml
- - matplotlib=1.4.3
- - numpy=1.9.3
+ - matplotlib=2.0.0
+ - numpy=1.12.0
- openpyxl=2.4.0
- python-dateutil
- python-blosc
diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml
index cc0c5a3192188..f079ac309b97c 100644
--- a/ci/travis-27.yaml
+++ b/ci/travis-27.yaml
@@ -14,7 +14,7 @@ dependencies:
- jemalloc=4.5.0.post
- jinja2=2.8
- lxml
- - matplotlib
+ - matplotlib=2.2.2
- mock
- nomkl
- numexpr
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 7a846c817aee2..843384b680cf8 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -225,7 +225,7 @@ Dependencies
------------
* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 24.2.0 or higher
-* `NumPy <http://www.numpy.org>`__: 1.9.0 or higher
+* `NumPy <http://www.numpy.org>`__: 1.12.0 or higher
* `python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher
* `pytz <http://pytz.sourceforge.net/>`__
@@ -236,11 +236,11 @@ Recommended Dependencies
* `numexpr <https://github.com/pydata/numexpr>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
- If installed, must be Version 2.4.6 or higher.
+ If installed, must be Version 2.6.1 or higher.
* `bottleneck <https://github.com/kwgoodman/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed,
- must be Version 1.0.0 or higher.
+ must be Version 1.2.0 or higher.
.. note::
@@ -255,9 +255,9 @@ Optional Dependencies
* `Cython <http://www.cython.org>`__: Only necessary to build development
version. Version 0.28.2 or higher.
-* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.14.0 or higher
+* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.18.1 or higher
* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
-* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
+* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher
* `Feather Format <https://github.com/wesm/feather>`__: necessary for feather-based storage, version 0.3.1 or higher.
* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.4.1) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.0.6) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are:
@@ -266,7 +266,7 @@ Optional Dependencies
* `pymysql <https://github.com/PyMySQL/PyMySQL>`__: for MySQL.
* `SQLite <https://docs.python.org/3/library/sqlite3.html>`__: for SQLite, this is included in Python's standard library by default.
-* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 1.4.3 or higher.
+* `matplotlib <http://matplotlib.org/>`__: for plotting, Version 2.0.0 or higher.
* For Excel I/O:
* `xlrd/xlwt <http://www.python-excel.org/>`__: Excel reading (xlrd) and writing (xlwt)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 8a7ff4be78a8a..3053625721560 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -206,8 +206,32 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
- A newly constructed empty :class:`DataFrame` with integer as the ``dtype`` will now only be cast to ``float64`` if ``index`` is specified (:issue:`22858`)
+.. _whatsnew_0240.api_breaking.deps:
+
+Dependencies have increased minimum versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have updated our minimum supported versions of dependencies (:issue:`21242`).
+If installed, we now require:
+
++-----------------+-----------------+----------+
+| Package | Minimum Version | Required |
++=================+=================+==========+
+| numpy | 1.12.0 | X |
++-----------------+-----------------+----------+
+| bottleneck | 1.2.0 | |
++-----------------+-----------------+----------+
+| matplotlib | 2.0.0 | |
++-----------------+-----------------+----------+
+| numexpr | 2.6.1 | |
++-----------------+-----------------+----------+
+| pytables | 3.4.2 | |
++-----------------+-----------------+----------+
+| scipy | 0.18.1 | |
++-----------------+-----------------+----------+
.. _whatsnew_0240.api_breaking.interval_values:
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index d4e3def84664b..5e67cf2ee2837 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -9,19 +9,16 @@
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
-_np_version_under1p10 = _nlv < LooseVersion('1.10')
-_np_version_under1p11 = _nlv < LooseVersion('1.11')
-_np_version_under1p12 = _nlv < LooseVersion('1.12')
_np_version_under1p13 = _nlv < LooseVersion('1.13')
_np_version_under1p14 = _nlv < LooseVersion('1.14')
_np_version_under1p15 = _nlv < LooseVersion('1.15')
-if _nlv < '1.9':
+if _nlv < '1.12':
raise ImportError('this version of pandas is incompatible with '
- 'numpy < 1.9.0\n'
+ 'numpy < 1.12.0\n'
'your numpy version is {0}.\n'
- 'Please upgrade numpy to >= 1.9.0 to use '
+ 'Please upgrade numpy to >= 1.12.0 to use '
'this pandas version'.format(_np_version))
@@ -43,9 +40,7 @@ def np_datetime64_compat(s, *args, **kwargs):
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
-
- if not _np_version_under1p11:
- s = tz_replacer(s)
+ s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
@@ -56,23 +51,17 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
-
- if not _np_version_under1p11:
-
- # is_list_like
- if (hasattr(arr, '__iter__') and
- not isinstance(arr, string_and_binary_types)):
- arr = [tz_replacer(s) for s in arr]
- else:
- arr = tz_replacer(arr)
+ # is_list_like
+ if (hasattr(arr, '__iter__')
+ and not isinstance(arr, string_and_binary_types)):
+ arr = [tz_replacer(s) for s in arr]
+ else:
+ arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = ['np',
- '_np_version_under1p10',
- '_np_version_under1p11',
- '_np_version_under1p12',
'_np_version_under1p13',
'_np_version_under1p14',
'_np_version_under1p15'
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4607aba070cfc..cb9ffc4bd0fd5 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -30,7 +30,6 @@
ensure_platform_int, ensure_object,
ensure_float64, ensure_uint64,
ensure_int64)
-from pandas.compat.numpy import _np_version_under1p10
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas.core import common as com
@@ -910,26 +909,12 @@ def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
- def _broadcast(arr_or_scalar, shape):
- """
- Helper function to broadcast arrays / scalars to the desired shape.
- """
- if _np_version_under1p10:
- if is_scalar(arr_or_scalar):
- out = np.empty(shape)
- out.fill(arr_or_scalar)
- else:
- out = arr_or_scalar
- else:
- out = np.broadcast_to(arr_or_scalar, shape)
- return out
-
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
- b2 = _broadcast(b, arr.shape)
+ b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
- b2_mask = _broadcast(b_mask, arr.shape)
+ b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index 2a9ed0fb9764d..06f72bb36de5c 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -2,7 +2,7 @@
from distutils.version import LooseVersion
_NUMEXPR_INSTALLED = False
-_MIN_NUMEXPR_VERSION = "2.4.6"
+_MIN_NUMEXPR_VERSION = "2.6.1"
try:
import numexpr as ne
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 640b2812d3e85..aff6f17fba2e2 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1805,12 +1805,7 @@ def to_series(right):
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
- try:
- right = np.broadcast_to(right, left.shape)
- except AttributeError:
- # numpy < 1.10.0
- right = np.tile(right, (1, left.shape[1]))
-
+ right = np.broadcast_to(right, left.shape)
right = left._constructor(right,
index=left.index,
columns=left.columns)
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py
index 5032b259e9831..385e88d58cc26 100644
--- a/pandas/plotting/_compat.py
+++ b/pandas/plotting/_compat.py
@@ -18,15 +18,8 @@ def inner():
return inner
-_mpl_ge_1_2_1 = _mpl_version('1.2.1', operator.ge)
-_mpl_le_1_2_1 = _mpl_version('1.2.1', operator.le)
-_mpl_ge_1_3_1 = _mpl_version('1.3.1', operator.ge)
-_mpl_ge_1_4_0 = _mpl_version('1.4.0', operator.ge)
-_mpl_ge_1_4_1 = _mpl_version('1.4.1', operator.ge)
-_mpl_ge_1_5_0 = _mpl_version('1.5.0', operator.ge)
-_mpl_ge_2_0_0 = _mpl_version('2.0.0', operator.ge)
-_mpl_le_2_0_0 = _mpl_version('2.0.0', operator.le)
_mpl_ge_2_0_1 = _mpl_version('2.0.1', operator.ge)
_mpl_ge_2_1_0 = _mpl_version('2.1.0', operator.ge)
_mpl_ge_2_2_0 = _mpl_version('2.2.0', operator.ge)
+_mpl_ge_2_2_2 = _mpl_version('2.2.2', operator.ge)
_mpl_ge_3_0_0 = _mpl_version('3.0.0', operator.ge)
diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index 96ea8a542a451..fe773a6054db5 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -35,8 +35,6 @@
from pandas.tseries.frequencies import FreqGroup
from pandas.core.indexes.period import Period, PeriodIndex
-from pandas.plotting._compat import _mpl_le_2_0_0
-
# constants
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
@@ -371,13 +369,6 @@ def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
- # For mpl > 2.0 the format strings are controlled via rcparams
- # so do not mess with them. For mpl < 2.0 change the second
- # break point and add a musec break point
- if _mpl_le_2_0_0():
- self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S'
- self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f'
-
class PandasAutoDateLocator(dates.AutoDateLocator):
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 77c97412bd3d7..405c534e8528b 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -30,10 +30,7 @@
from pandas.io.formats.printing import pprint_thing
-from pandas.plotting._compat import (_mpl_ge_1_3_1,
- _mpl_ge_1_5_0,
- _mpl_ge_2_0_0,
- _mpl_ge_3_0_0)
+from pandas.plotting._compat import _mpl_ge_3_0_0
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
@@ -551,14 +548,6 @@ def plt(self):
import matplotlib.pyplot as plt
return plt
- @staticmethod
- def mpl_ge_1_3_1():
- return _mpl_ge_1_3_1()
-
- @staticmethod
- def mpl_ge_1_5_0():
- return _mpl_ge_1_5_0()
-
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
@@ -908,8 +897,7 @@ def _make_plot(self):
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
- if self.mpl_ge_1_3_1():
- cbar_label = c if c_is_column else ''
+ cbar_label = c if c_is_column else ''
self._plot_colorbar(ax, label=cbar_label)
if label is not None:
@@ -1012,10 +1000,9 @@ def _make_plot(self):
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
- if not _mpl_ge_2_0_0():
- lines = _get_all_lines(ax)
- left, right = _get_xlim(lines)
- ax.set_xlim(left, right)
+ lines = _get_all_lines(ax)
+ left, right = _get_xlim(lines)
+ ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
@@ -1141,8 +1128,7 @@ def _plot(cls, ax, x, y, style=None, column_num=None,
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
- if cls.mpl_ge_1_5_0():
- line_kwds.pop('label')
+ line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
@@ -1165,19 +1151,9 @@ def _plot(cls, ax, x, y, style=None, column_num=None,
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
- res = [rect] if cls.mpl_ge_1_5_0() else lines
+ res = [rect]
return res
- def _add_legend_handle(self, handle, label, index=None):
- if not self.mpl_ge_1_5_0():
- from matplotlib.patches import Rectangle
- # Because fill_between isn't supported in legend,
- # specifically add Rectangle handle here
- alpha = self.kwds.get('alpha', None)
- handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
- alpha=alpha)
- LinePlot._add_legend_handle(self, handle, label, index=index)
-
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py
index c72e092c73aa2..9bc12d22e1685 100644
--- a/pandas/plotting/_style.py
+++ b/pandas/plotting/_style.py
@@ -4,14 +4,12 @@
import warnings
from contextlib import contextmanager
-import re
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import lrange, lmap
import pandas.compat as compat
-from pandas.plotting._compat import _mpl_ge_2_0_0
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
@@ -72,18 +70,9 @@ def _maybe_valid_colors(colors):
# check whether each character can be convertible to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
- # Special case for single str 'CN' match and convert to hex
- # for supporting matplotlib < 2.0.0
- if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0():
- hex_color = [c['color']
- for c in list(plt.rcParams['axes.prop_cycle'])]
- colors = [hex_color[int(colors[1])]]
- else:
- # this may no longer be required
- msg = ("'{0}' can be parsed as both single color and "
- "color cycle. Specify each color using a list "
- "like ['{0}'] or {1}")
- raise ValueError(msg.format(colors, list(colors)))
+ hex_color = [c['color']
+ for c in list(plt.rcParams['axes.prop_cycle'])]
+ colors = [hex_color[int(colors[1])]]
elif maybe_single_color:
colors = [colors]
else:
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index d81ab2b3a2ec3..fe98b74499983 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -15,9 +15,7 @@
import pandas.core.indexes.period as period
from pandas.core import ops
-from pandas import (
- Period, PeriodIndex, period_range, Series,
- _np_version_under1p10)
+from pandas import Period, PeriodIndex, period_range, Series
# ------------------------------------------------------------------
@@ -897,20 +895,14 @@ def test_pi_ops_errors(self, ng):
with pytest.raises(TypeError):
np.add(obj, ng)
- if _np_version_under1p10:
- assert np.add(ng, obj) is NotImplemented
- else:
- with pytest.raises(TypeError):
- np.add(ng, obj)
+ with pytest.raises(TypeError):
+ np.add(ng, obj)
with pytest.raises(TypeError):
np.subtract(obj, ng)
- if _np_version_under1p10:
- assert np.subtract(ng, obj) is NotImplemented
- else:
- with pytest.raises(TypeError):
- np.subtract(ng, obj)
+ with pytest.raises(TypeError):
+ np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
@@ -1014,10 +1006,7 @@ def test_pi_sub_period(self):
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
- if _np_version_under1p10:
- assert result is NotImplemented
- else:
- tm.assert_index_equal(result, exp)
+ tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 8864e5fffeb12..b83fba7e7b277 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -15,7 +15,6 @@
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
- _np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
@@ -2021,9 +2020,6 @@ def test_dot(self):
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
- @pytest.mark.xfail(
- _np_version_under1p12,
- reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 2f264874378bc..3dbac79fed02b 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -6,7 +6,7 @@
import pytest
import numpy as np
-from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11)
+from pandas import DataFrame, Series, Timestamp
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
@@ -154,12 +154,8 @@ def test_quantile_interpolation(self):
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
- if _np_version_under1p11:
- expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
- index=[.25, .5], columns=['a', 'b', 'c'])
- else:
- expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
- index=[.25, .5], columns=['a', 'b', 'c'])
+ expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
+ index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_multi(self):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index b60b222d095b9..9ce77326d37b7 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -7,8 +7,7 @@
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp,
- date_range, _np_version_under1p10, Index,
- bdate_range)
+ date_range, bdate_range, Index)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay, Day, Hour
from pandas.tests.test_base import Ops
from pandas.core.dtypes.generic import ABCDateOffset
@@ -89,12 +88,11 @@ def test_numpy_minmax(self):
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
- if not _np_version_under1p10:
- errmsg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmin, dr, out=0)
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmax, dr, out=0)
+ errmsg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmin, dr, out=0)
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmax, dr, out=0)
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 85aa3f6a38fb3..a59efe57f83c4 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -5,8 +5,7 @@
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
-from pandas import (DatetimeIndex, PeriodIndex, Series, Period,
- _np_version_under1p10, Index)
+from pandas import DatetimeIndex, PeriodIndex, Series, Period, Index
from pandas.tests.test_base import Ops
@@ -73,12 +72,11 @@ def test_numpy_minmax(self):
assert np.argmin(pr) == 0
assert np.argmax(pr) == 5
- if not _np_version_under1p10:
- errmsg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmin, pr, out=0)
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmax, pr, out=0)
+ errmsg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmin, pr, out=0)
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmax, pr, out=0)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 6d142722c315a..82527464ea6e7 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -5,7 +5,7 @@
import pandas as pd
from pandas.util import testing as tm
from pandas import (Series, period_range, DatetimeIndex, PeriodIndex,
- DataFrame, _np_version_under1p12, Period)
+ DataFrame, Period)
class TestPeriodIndex(object):
@@ -68,16 +68,12 @@ def test_range_slice_day(self):
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
- # changed to TypeError in 1.12
- # https://github.com/numpy/numpy/pull/6271
- exc = IndexError if _np_version_under1p12 else TypeError
-
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
- with pytest.raises(exc):
+ with pytest.raises(TypeError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
@@ -89,7 +85,7 @@ def test_range_slice_day(self):
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
- with pytest.raises(exc):
+ with pytest.raises(TypeError):
idx[v:]
def test_range_slice_seconds(self):
@@ -98,16 +94,12 @@ def test_range_slice_seconds(self):
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
- # changed to TypeError in 1.12
- # https://github.com/numpy/numpy/pull/6271
- exc = IndexError if _np_version_under1p12 else TypeError
-
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
- with pytest.raises(exc):
+ with pytest.raises(TypeError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index d7bdd18f48523..9f8a3e893c3de 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -5,10 +5,8 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import to_timedelta
from pandas import (Series, Timedelta, Timestamp, TimedeltaIndex,
- timedelta_range,
- _np_version_under1p10)
+ timedelta_range, to_timedelta)
from pandas._libs.tslib import iNaT
from pandas.tests.test_base import Ops
from pandas.tseries.offsets import Day, Hour
@@ -68,12 +66,11 @@ def test_numpy_minmax(self):
assert np.argmin(td) == 0
assert np.argmax(td) == 5
- if not _np_version_under1p10:
- errmsg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmin, td, out=0)
- tm.assert_raises_regex(
- ValueError, errmsg, np.argmax, td, out=0)
+ errmsg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmin, td, out=0)
+ tm.assert_raises_regex(
+ ValueError, errmsg, np.argmax, td, out=0)
def test_value_counts_unique(self):
# GH 7735
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index b142ce339879c..f41a3a10604af 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -39,7 +39,7 @@ def _ok_for_gaussian_kde(kind):
except ImportError:
return False
- return plotting._compat._mpl_ge_1_5_0()
+ return True
@td.skip_if_no_mpl
@@ -50,31 +50,16 @@ def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
- self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1()
- self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1()
- self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0()
- self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0()
- self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0()
self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
+ self.mpl_ge_2_1_0 = plotting._compat._mpl_ge_2_1_0()
self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0()
+ self.mpl_ge_2_2_2 = plotting._compat._mpl_ge_2_2_2()
self.mpl_ge_3_0_0 = plotting._compat._mpl_ge_3_0_0()
- if self.mpl_ge_1_4_0:
- self.bp_n_objects = 7
- else:
- self.bp_n_objects = 8
- if self.mpl_ge_1_5_0:
- # 1.5 added PolyCollections to legend handler
- # so we have twice as many items.
- self.polycollection_factor = 2
- else:
- self.polycollection_factor = 1
-
- if self.mpl_ge_2_0_0:
- self.default_figsize = (6.4, 4.8)
- else:
- self.default_figsize = (8.0, 6.0)
- self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default'
+ self.bp_n_objects = 7
+ self.polycollection_factor = 2
+ self.default_figsize = (6.4, 4.8)
+ self.default_tick_position = 'left'
n = 100
with tm.RNGContext(42):
@@ -462,7 +447,7 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None,
assert isinstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
- axes = line.axes if self.mpl_ge_1_5_0 else line.get_axes()
+ axes = line.axes
if check_ax_title:
assert axes.get_title() == key
else:
@@ -510,19 +495,11 @@ def is_grid_on():
obj.plot(kind=kind, grid=True, **kws)
assert is_grid_on()
- def _maybe_unpack_cycler(self, rcParams, field='color'):
+ def _unpack_cycler(self, rcParams, field='color'):
"""
- Compat layer for MPL 1.5 change to color cycle
-
- Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
- After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
+ Auxiliary function for correctly unpacking cycler after MPL >= 1.5
"""
- if self.mpl_ge_1_5_0:
- cyl = rcParams['axes.prop_cycle']
- colors = [v[field] for v in cyl]
- else:
- colors = rcParams['axes.color_cycle']
- return colors
+ return [v[field] for v in rcParams['axes.prop_cycle']]
def _check_plot_works(f, filterwarnings='always', **kwargs):
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 7661b46a79061..e89584ca35d94 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -3,7 +3,6 @@
import pytest
import itertools
import string
-from distutils.version import LooseVersion
from pandas import Series, DataFrame, MultiIndex
from pandas.compat import range, lzip
@@ -21,15 +20,6 @@
""" Test cases for .boxplot method """
-def _skip_if_mpl_14_or_dev_boxplot():
- # GH 8382
- # Boxplot failures on 1.4 and 1.4.1
- # Don't need try / except since that's done at class level
- import matplotlib
- if LooseVersion(matplotlib.__version__) >= LooseVersion('1.4'):
- pytest.skip("Matplotlib Regression in 1.4 and current dev.")
-
-
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@@ -71,12 +61,12 @@ def test_boxplot_legacy2(self):
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
- ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()
+ ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
- ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()
+ ax_axes = ax.axes
assert ax_axes is axes['A']
# Multiple columns with an ax argument should use same figure
@@ -155,7 +145,6 @@ def _check_ax_limits(col, ax):
@pytest.mark.slow
def test_boxplot_empty_column(self):
- _skip_if_mpl_14_or_dev_boxplot()
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index c66e03fe7b2a2..4865638671ea9 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -7,7 +7,7 @@
from pandas.compat import lrange, zip
import numpy as np
-from pandas import Index, Series, DataFrame, NaT
+from pandas import Index, Series, DataFrame, NaT, isna
from pandas.compat import PY3
from pandas.core.indexes.datetimes import date_range, bdate_range
from pandas.core.indexes.timedeltas import timedelta_range
@@ -135,7 +135,7 @@ def f(*args, **kwds):
_, ax = self.plt.subplots()
ts.plot(style='k', ax=ax)
- color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
+ color = (0., 0., 0., 1)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
@@ -403,80 +403,92 @@ def test_get_finder(self):
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
- if self.mpl_ge_2_0_0:
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)
+ else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
xpl1 = [7565, 7564, 7553, 7546, 7518, 7428, 7066]
xpl2 = [7566, 7564, 7554, 7546, 7519, 7429, 7066]
- else:
- xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)
+ rs1 = []
+ rs2 = []
for i, n in enumerate(day_lst):
- xp = xpl1[i]
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
- rs = xaxis.get_majorticklocs()[0]
- assert xp == rs
- xp = xpl2[i]
+ rs1.append(xaxis.get_majorticklocs()[0])
+
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
- rs = xaxis.get_majorticklocs()[0]
- assert xp == rs
+ rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
+ assert rs1 == xpl1
+ assert rs2 == xpl2
+
@pytest.mark.slow
def test_finder_quarterly(self):
yrs = [3.5, 11]
- if self.mpl_ge_2_0_0:
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)
+ else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
xpl1 = [68, 68]
xpl2 = [72, 68]
- else:
- xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)
+ rs1 = []
+ rs2 = []
for i, n in enumerate(yrs):
- xp = xpl1[i]
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
- rs = xaxis.get_majorticklocs()[0]
- assert rs == xp
- xp = xpl2[i]
+ rs1.append(xaxis.get_majorticklocs()[0])
+
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
- rs = xaxis.get_majorticklocs()[0]
- assert xp == rs
+ rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
+ assert rs1 == xpl1
+ assert rs2 == xpl2
+
@pytest.mark.slow
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
- if self.mpl_ge_2_0_0:
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)
+ else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
xpl1 = [216, 216, 204, 204]
xpl2 = [216, 216, 216, 204]
- else:
- xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)
+ rs1 = []
+ rs2 = []
for i, n in enumerate(yrs):
- xp = xpl1[i]
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
- rs = xaxis.get_majorticklocs()[0]
- assert rs == xp
- xp = xpl2[i]
+ rs1.append(xaxis.get_majorticklocs()[0])
+
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
- rs = xaxis.get_majorticklocs()[0]
- assert xp == rs
+ rs2.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
+ assert rs1 == xpl1
+ assert rs2 == xpl2
+
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
@@ -489,21 +501,26 @@ def test_finder_monthly_long(self):
@pytest.mark.slow
def test_finder_annual(self):
- if self.mpl_ge_2_0_0:
- xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970]
- else:
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
+ else: # 2.0.1, 2.1.0, 2.2.2, 2.2.3
+ xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970]
+ xp = [Period(x, freq='A').ordinal for x in xp]
+ rs = []
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
- rs = xaxis.get_majorticklocs()[0]
- assert rs == Period(xp[i], freq='A').ordinal
+ rs.append(xaxis.get_majorticklocs()[0])
self.plt.close(ax.get_figure())
+ assert rs == xp
+
@pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
@@ -513,10 +530,8 @@ def test_finder_minutely(self):
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
- if self.mpl_ge_2_0_0:
- xp = Period('1998-12-29 12:00', freq='Min').ordinal
- else:
- xp = Period('1/1/1999', freq='Min').ordinal
+ xp = Period('1/1/1999', freq='Min').ordinal
+
assert rs == xp
def test_finder_hourly(self):
@@ -527,13 +542,13 @@ def test_finder_hourly(self):
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
- if self.mpl_ge_2_0_0:
- xp = Period('1998-12-31 22:00', freq='H').ordinal
- else:
+ if self.mpl_ge_2_0_1:
xp = Period('1/1/1999', freq='H').ordinal
+ else: # 2.0.0
+ xp = Period('1998-12-31 22:00', freq='H').ordinal
+
assert rs == xp
- @td.skip_if_mpl_1_5
@pytest.mark.slow
def test_gaps(self):
ts = tm.makeTimeSeries()
@@ -544,6 +559,12 @@ def test_gaps(self):
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
+
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
+
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
@@ -559,6 +580,12 @@ def test_gaps(self):
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
+
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
+
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
@@ -574,11 +601,15 @@ def test_gaps(self):
assert len(lines) == 1
line = lines[0]
data = line.get_xydata()
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
+
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
- @td.skip_if_mpl_1_5
@pytest.mark.slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
@@ -592,8 +623,13 @@ def test_gap_upsample(self):
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
+
line = lines[0]
data = line.get_xydata()
+ if (self.mpl_ge_3_0_0 or not self.mpl_ge_2_0_1
+ or (self.mpl_ge_2_1_0 and not self.mpl_ge_2_2_2)):
+ # 2.0.0, 2.2.0 (exactly) or >= 3.0.0
+ data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan)
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
@@ -659,8 +695,6 @@ def test_secondary_y_ts(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_secondary_kde(self):
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
_skip_if_no_scipy_gaussian_kde()
ser = Series(np.random.randn(10))
@@ -1359,18 +1393,13 @@ def test_plot_outofbounds_datetime(self):
def test_format_timedelta_ticks_narrow(self):
- if self.mpl_ge_2_2_0:
- expected_labels = (['-1 days 23:59:59.999999998'] +
- ['00:00:00.0000000{:0>2d}'.format(2 * i)
- for i in range(6)])
- elif self.mpl_ge_2_0_0:
+ if self.mpl_ge_2_0_1:
+ expected_labels = (['00:00:00.0000000{:0>2d}'.format(i)
+ for i in range(10)])
+ else: # 2.0.0
expected_labels = [''] + [
'00:00:00.00000000{:d}'.format(2 * i)
for i in range(5)] + ['']
- else:
- expected_labels = [
- '00:00:00.00000000{:d}'.format(i)
- for i in range(10)]
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
@@ -1378,41 +1407,30 @@ def test_format_timedelta_ticks_narrow(self):
df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
- assert len(labels) == len(expected_labels)
- for l, l_expected in zip(labels, expected_labels):
- assert l.get_text() == l_expected
- def test_format_timedelta_ticks_wide(self):
+ result_labels = [x.get_text() for x in labels]
+ assert len(result_labels) == len(expected_labels)
+ assert result_labels == expected_labels
- if self.mpl_ge_2_0_0:
- expected_labels = [
- '',
- '00:00:00',
- '1 days 03:46:40',
- '2 days 07:33:20',
- '3 days 11:20:00',
- '4 days 15:06:40',
- '5 days 18:53:20',
- '6 days 22:40:00',
- '8 days 02:26:40',
- '9 days 06:13:20',
- ''
- ]
- if self.mpl_ge_2_2_0:
- expected_labels[0] = '-2 days 20:13:20'
- expected_labels[-1] = '10 days 10:00:00'
- else:
- expected_labels = [
- '00:00:00',
- '1 days 03:46:40',
- '2 days 07:33:20',
- '3 days 11:20:00',
- '4 days 15:06:40',
- '5 days 18:53:20',
- '6 days 22:40:00',
- '8 days 02:26:40',
- ''
- ]
+ def test_format_timedelta_ticks_wide(self):
+ expected_labels = [
+ '',
+ '00:00:00',
+ '1 days 03:46:40',
+ '2 days 07:33:20',
+ '3 days 11:20:00',
+ '4 days 15:06:40',
+ '5 days 18:53:20',
+ '6 days 22:40:00',
+ '8 days 02:26:40',
+ '9 days 06:13:20',
+ ''
+ ]
+ if self.mpl_ge_2_2_0:
+ expected_labels = expected_labels[1:-1]
+ elif self.mpl_ge_2_0_1:
+ expected_labels = expected_labels[1:-1]
+ expected_labels[-1] = ''
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
@@ -1420,9 +1438,10 @@ def test_format_timedelta_ticks_wide(self):
ax = df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
- assert len(labels) == len(expected_labels)
- for l, l_expected in zip(labels, expected_labels):
- assert l.get_text() == l_expected
+
+ result_labels = [x.get_text() for x in labels]
+ assert len(result_labels) == len(expected_labels)
+ assert result_labels == expected_labels
def test_timedelta_plot(self):
# test issue #8711
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index a4f5d8e2f4ff2..25dfbaba762c9 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -141,22 +141,15 @@ def test_plot(self):
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
- if self.mpl_ge_1_5_0:
- result = ax.axes
- else:
- result = ax.get_axes() # deprecated
+ result = ax.axes
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
- # test CN mpl 2.0 color cycle
- if self.mpl_ge_2_0_0:
- colors = ['C' + str(x) for x in range(10)]
- df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
- for c in colors:
- _check_plot_works(df.plot, color=c)
- else:
- pytest.skip("not supported in matplotlib < 2.0.0")
+ colors = ['C' + str(x) for x in range(10)]
+ df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
+ for c in colors:
+ _check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
# GH 3486
@@ -854,7 +847,7 @@ def test_area_lim(self):
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
- default_colors = self._maybe_unpack_cycler(plt.rcParams)
+ default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
@@ -1180,11 +1173,9 @@ def test_plot_scatter_with_c(self):
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
- if self.mpl_ge_1_3_1:
-
- # n.b. there appears to be no public method to get the colorbar
- # label
- assert ax.collections[0].colorbar._label == 'z'
+ # n.b. there appears to be no public method
+ # to get the colorbar label
+ assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
@@ -1227,7 +1218,7 @@ def test_scatter_colors(self):
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
- default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
+ default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
@@ -1392,10 +1383,7 @@ def test_bar_edge(self):
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
- expected = np.array([1., 10.])
-
- if not self.mpl_le_1_2_1:
- expected = np.hstack((.1, expected, 100))
+ expected = np.array([.1, 1., 10., 100])
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
@@ -1404,9 +1392,7 @@ def test_bar_log_no_subplots(self):
@pytest.mark.slow
def test_bar_log_subplots(self):
- expected = np.array([1., 10., 100., 1000.])
- if not self.mpl_le_1_2_1:
- expected = np.hstack((.1, expected, 1e4))
+ expected = np.array([.1, 1., 10., 100., 1000., 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
@@ -1521,8 +1507,6 @@ def test_boxplot_subplots_return_type(self):
@td.skip_if_no_scipy
def test_kde_df(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
@@ -1545,8 +1529,6 @@ def test_kde_df(self):
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
@@ -1555,8 +1537,6 @@ def test_kde_missing_vals(self):
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
- if self.mpl_le_1_2_1:
- pytest.skip("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
@@ -1668,44 +1648,42 @@ def test_hist_df_coord(self):
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
- if self.mpl_ge_1_3_1:
-
- # horizontal
- ax = df.plot.hist(bins=5, orientation='horizontal')
- self._check_box_coord(ax.patches[:5],
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([10, 9, 8, 7, 6]))
- self._check_box_coord(ax.patches[5:10],
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([8, 8, 8, 8, 8]))
- self._check_box_coord(ax.patches[10:],
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([6, 7, 8, 9, 10]))
-
- ax = df.plot.hist(bins=5, stacked=True,
- orientation='horizontal')
- self._check_box_coord(ax.patches[:5],
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([10, 9, 8, 7, 6]))
- self._check_box_coord(ax.patches[5:10],
- expected_x=np.array([10, 9, 8, 7, 6]),
- expected_w=np.array([8, 8, 8, 8, 8]))
- self._check_box_coord(
- ax.patches[10:],
- expected_x=np.array([18, 17, 16, 15, 14]),
- expected_w=np.array([6, 7, 8, 9, 10]))
-
- axes = df.plot.hist(bins=5, stacked=True, subplots=True,
- orientation='horizontal')
- self._check_box_coord(axes[0].patches,
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([10, 9, 8, 7, 6]))
- self._check_box_coord(axes[1].patches,
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([8, 8, 8, 8, 8]))
- self._check_box_coord(axes[2].patches,
- expected_x=np.array([0, 0, 0, 0, 0]),
- expected_w=np.array([6, 7, 8, 9, 10]))
+ # horizontal
+ ax = df.plot.hist(bins=5, orientation='horizontal')
+ self._check_box_coord(ax.patches[:5],
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([10, 9, 8, 7, 6]))
+ self._check_box_coord(ax.patches[5:10],
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([8, 8, 8, 8, 8]))
+ self._check_box_coord(ax.patches[10:],
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([6, 7, 8, 9, 10]))
+
+ ax = df.plot.hist(bins=5, stacked=True,
+ orientation='horizontal')
+ self._check_box_coord(ax.patches[:5],
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([10, 9, 8, 7, 6]))
+ self._check_box_coord(ax.patches[5:10],
+ expected_x=np.array([10, 9, 8, 7, 6]),
+ expected_w=np.array([8, 8, 8, 8, 8]))
+ self._check_box_coord(
+ ax.patches[10:],
+ expected_x=np.array([18, 17, 16, 15, 14]),
+ expected_w=np.array([6, 7, 8, 9, 10]))
+
+ axes = df.plot.hist(bins=5, stacked=True, subplots=True,
+ orientation='horizontal')
+ self._check_box_coord(axes[0].patches,
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([10, 9, 8, 7, 6]))
+ self._check_box_coord(axes[1].patches,
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([8, 8, 8, 8, 8]))
+ self._check_box_coord(axes[2].patches,
+ expected_x=np.array([0, 0, 0, 0, 0]),
+ expected_w=np.array([6, 7, 8, 9, 10]))
@pytest.mark.slow
def test_plot_int_columns(self):
@@ -1904,14 +1882,13 @@ def test_dont_modify_colors(self):
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
- default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
+ default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
- if self.mpl_ge_2_0_0:
- c = [c]
+ c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
@@ -1992,13 +1969,7 @@ def test_area_colors(self):
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
- if self.mpl_ge_1_5_0:
- self._check_colors(handles, facecolors=custom_colors)
- else:
- # legend is stored as Line2D, thus check linecolors
- linehandles = [x for x in handles
- if not isinstance(x, PolyCollection)]
- self._check_colors(linehandles, linecolors=custom_colors)
+ self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
@@ -2011,12 +1982,7 @@ def test_area_colors(self):
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
- if self.mpl_ge_1_5_0:
- self._check_colors(handles, facecolors=jet_colors)
- else:
- linehandles = [x for x in handles
- if not isinstance(x, PolyCollection)]
- self._check_colors(linehandles, linecolors=jet_colors)
+ self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
@@ -2029,18 +1995,14 @@ def test_area_colors(self):
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
- if self.mpl_ge_1_5_0:
- linecolors = jet_with_alpha
- else:
- # Line2D can't have alpha in its linecolor
- linecolors = jet_colors
+ linecolors = jet_with_alpha
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
- default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
+ default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
@@ -2076,8 +2038,6 @@ def test_hist_colors(self):
@td.skip_if_no_scipy
def test_kde_colors(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
from matplotlib import cm
@@ -2101,11 +2061,9 @@ def test_kde_colors(self):
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
from matplotlib import cm
- default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
+ default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
@@ -2164,7 +2122,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
- fliers_c = 'k' if self.mpl_ge_2_0_0 else 'b'
+ fliers_c = 'k'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
@@ -2176,7 +2134,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
- default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
+ default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
@@ -2225,17 +2183,14 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
+ import cycler
colors = list('rgbk')
- if self.mpl_ge_1_5_0:
- import cycler
- plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
- else:
- plt.rcParams['axes.color_cycle'] = colors
+ plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
- expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
+ expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
@@ -2591,19 +2546,12 @@ def test_errorbar_asymmetrical(self):
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
- data = df.values
ax = df.plot(yerr=err, xerr=err / 2)
- if self.mpl_ge_2_0_0:
- yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
- expected_0_0 = err[0, :, 0] * np.array([-1, 1])
- tm.assert_almost_equal(yerr_0_0, expected_0_0)
- else:
- assert ax.lines[7].get_ydata()[0] == data[0, 1] - err[1, 0, 0]
- assert ax.lines[8].get_ydata()[0] == data[0, 1] + err[1, 1, 0]
- assert ax.lines[5].get_xdata()[0] == -err[1, 0, 0] / 2
- assert ax.lines[6].get_xdata()[0] == err[1, 1, 0] / 2
+ yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
+ expected_0_0 = err[0, :, 0] * np.array([-1, 1])
+ tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 2864877550bac..1d9942603a269 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -122,7 +122,7 @@ def test_hist_no_overlap(self):
subplot(122)
y.hist()
fig = gcf()
- axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
+ axes = fig.axes
assert len(axes) == 2
@pytest.mark.slow
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 8c84b785c88e4..54d17a4773749 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -76,10 +76,7 @@ def test_scatter_matrix_axis(self):
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
# GH 5662
- if self.mpl_ge_2_0_0:
- expected = ['-2', '0', '2']
- else:
- expected = ['-2', '-1', '0', '1', '2']
+ expected = ['-2', '0', '2']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
@@ -91,10 +88,7 @@ def test_scatter_matrix_axis(self):
axes = _check_plot_works(scatter_matrix, filterwarnings='always',
frame=df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
- if self.mpl_ge_2_0_0:
- expected = ['-1.0', '-0.5', '0.0']
- else:
- expected = ['-1.2', '-1.0', '-0.8', '-0.6', '-0.4', '-0.2', '0.0']
+ expected = ['-1.0', '-0.5', '0.0']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 5dc7d52e05778..dc708278836d2 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -88,10 +88,7 @@ def test_plot_figsize_and_title(self):
def test_dont_modify_rcParams(self):
# GH 8242
- if self.mpl_ge_1_5_0:
- key = 'axes.prop_cycle'
- else:
- key = 'axes.color_cycle'
+ key = 'axes.prop_cycle'
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
@@ -211,10 +208,7 @@ def test_line_use_index_false(self):
@pytest.mark.slow
def test_bar_log(self):
- expected = np.array([1., 10., 100., 1000.])
-
- if not self.mpl_le_1_2_1:
- expected = np.hstack((.1, expected, 1e4))
+ expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
@@ -227,17 +221,12 @@ def test_bar_log(self):
tm.close()
# GH 9905
- expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
-
- if not self.mpl_le_1_2_1:
- expected = np.hstack((1.0e-04, expected, 1.0e+01))
- if self.mpl_ge_2_0_0:
- expected = np.hstack((1.0e-05, expected))
+ expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax)
- ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
- ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
+ ymin = 0.0007943282347242822
+ ymax = 0.12589254117941673
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
@@ -474,7 +463,7 @@ def test_hist_no_overlap(self):
subplot(122)
y.hist()
fig = gcf()
- axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
+ axes = fig.axes
assert len(axes) == 2
@pytest.mark.slow
@@ -591,8 +580,6 @@ def test_plot_fails_with_dupe_color_and_style(self):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde(self):
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
@@ -618,8 +605,6 @@ def test_hist_kde(self):
@td.skip_if_no_scipy
def test_kde_kwargs(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(self.ts.plot.kde, bw_method='scott', ind=20)
@@ -638,8 +623,6 @@ def test_kde_kwargs(self):
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
_skip_if_no_scipy_gaussian_kde()
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
s = Series(np.random.uniform(size=50))
s[0] = np.nan
@@ -656,22 +639,18 @@ def test_hist_kwargs(self):
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
- if self.mpl_ge_1_3_1:
- _, ax = self.plt.subplots()
- ax = self.ts.plot.hist(orientation='horizontal', ax=ax)
- self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
- tm.close()
+ _, ax = self.plt.subplots()
+ ax = self.ts.plot.hist(orientation='horizontal', ax=ax)
+ self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
+ tm.close()
- _, ax = self.plt.subplots()
- ax = self.ts.plot.hist(align='left', stacked=True, ax=ax)
- tm.close()
+ _, ax = self.plt.subplots()
+ ax = self.ts.plot.hist(align='left', stacked=True, ax=ax)
+ tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde_color(self):
- if not self.mpl_ge_1_5_0:
- pytest.skip("mpl is not supported")
-
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax)
self._check_ax_scales(ax, yaxis='log')
@@ -870,10 +849,7 @@ def test_time_series_plot_color_kwargs(self):
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
- if self.mpl_ge_1_5_0:
- def_colors = self._maybe_unpack_cycler(mpl.rcParams)
- else:
- def_colors = mpl.rcParams['axes.color_cycle']
+ def_colors = self._unpack_cycler(mpl.rcParams)
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 517bb9511552c..d4a204ed265b5 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -11,8 +11,7 @@
import pandas as pd
from pandas import (Series, Categorical, DataFrame, isna, notna,
- bdate_range, date_range, _np_version_under1p10,
- CategoricalIndex)
+ bdate_range, date_range, CategoricalIndex)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
@@ -1246,12 +1245,11 @@ def test_numpy_argmin_deprecated(self):
assert result == 1
- if not _np_version_under1p10:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- msg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.argmin,
- s, out=data)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ msg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.argmin,
+ s, out=data)
def test_idxmax(self):
# test idxmax
@@ -1315,12 +1313,11 @@ def test_numpy_argmax_deprecated(self):
assert result == 10
- if not _np_version_under1p10:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- msg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.argmax,
- s, out=data)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ msg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.argmax,
+ s, out=data)
def test_ptp(self):
# GH21614
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index a1ec8314841e3..1cd7c73337e4f 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -9,8 +9,8 @@
import numpy as np
import pandas as pd
-from pandas import (Series, DataFrame, bdate_range,
- isna, compat, _np_version_under1p12)
+
+from pandas import Series, DataFrame, bdate_range, isna, compat
from pandas.errors import PerformanceWarning
from pandas.tseries.offsets import BDay
import pandas.util.testing as tm
@@ -559,17 +559,16 @@ def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
- if not _np_version_under1p12:
- tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
- np.take(sp.to_dense(), indices, axis=0))
+ tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
+ np.take(sp.to_dense(), indices, axis=0))
- msg = "the 'out' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.take,
- sp, indices, out=np.empty(sp.shape))
+ msg = "the 'out' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.take,
+ sp, indices, out=np.empty(sp.shape))
- msg = "the 'mode' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.take,
- sp, indices, out=None, mode='clip')
+ msg = "the 'mode' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.take,
+ sp, indices, out=None, mode='clip')
def test_setitem(self):
self.bseries[5] = 7.
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index d2b7979aed98d..1fd801c68fdde 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -224,10 +224,6 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level):
def test_complex_sorting(self):
# gh 12666 - check no segfault
- # Test not valid numpy versions older than 1.11
- if pd._np_version_under1p11:
- pytest.skip("Test valid only for numpy 1.11+")
-
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index c101fd25ce5e5..a7b9bf9c9a351 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -13,7 +13,7 @@
from pandas.core.api import DataFrame, Panel
from pandas.core.computation import expressions as expr
-from pandas import compat, _np_version_under1p11, _np_version_under1p13
+from pandas import compat, _np_version_under1p13
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal)
from pandas.io.formats.printing import pprint_thing
@@ -272,10 +272,7 @@ def testit():
for op, op_str in [('add', '+'), ('sub', '-'), ('mul', '*'),
('div', '/'), ('pow', '**')]:
- # numpy >= 1.11 doesn't handle integers
- # raised to integer powers
- # https://github.com/pandas-dev/pandas/issues/15363
- if op == 'pow' and not _np_version_under1p11:
+ if op == 'pow':
continue
if op == 'div':
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 98026f6d4cf0e..aa5d0016eca95 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -7,8 +7,7 @@
import numpy as np
from numpy import nan
from pandas.core import common as com
-from pandas import (DataFrame, MultiIndex, merge, concat, Series, compat,
- _np_version_under1p10)
+from pandas import DataFrame, MultiIndex, merge, concat, Series, compat
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.sorting import (is_int64_overflow_possible,
@@ -416,7 +415,7 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- if compat.PY2 and not _np_version_under1p10:
+ if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
pytest.raises(TypeError, safe_sort, arr)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index cc663fc59cbf1..4b0c4d581a008 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -7,7 +7,6 @@
from datetime import datetime, timedelta
from numpy.random import randn
import numpy as np
-from pandas import _np_version_under1p12
import pandas as pd
from pandas import (Series, DataFrame, bdate_range,
@@ -1292,8 +1291,6 @@ def test_rolling_quantile_np_percentile(self):
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
- @pytest.mark.skipif(_np_version_under1p12,
- reason='numpy midpoint interpolation is broken')
@pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher',
'nearest', 'midpoint'])
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 5d7b23894e745..2fe891346065d 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -78,17 +78,6 @@ def _skip_if_no_mpl():
return True
-def _skip_if_mpl_1_5():
- mod = safe_import("matplotlib")
-
- if mod:
- v = mod.__version__
- if LooseVersion(v) > LooseVersion('1.4.3') or str(v)[0] == '0':
- return True
- else:
- mod.use("Agg", warn=False)
-
-
def _skip_if_mpl_2_2():
mod = safe_import("matplotlib")
@@ -164,8 +153,6 @@ def decorated_func(func):
reason="NumPy 1.15 or greater required")
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(),
reason="matplotlib is present")
-skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(),
- reason="matplotlib 1.5")
xfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(),
reason="matplotlib 2.2")
skip_if_32bit = pytest.mark.skipif(is_platform_32bit(),
diff --git a/setup.py b/setup.py
index bfd0c50c9e9be..f31aaa7e79a0d 100755
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
-min_numpy_ver = '1.9.0'
+min_numpy_ver = '1.12.0'
setuptools_kwargs = {
'install_requires': [
'python-dateutil >= 2.5.0',
| - [x] closes #21242
- [x] tests modified / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Spurred on also partly by compat failure of #22725. ;-)
| https://api.github.com/repos/pandas-dev/pandas/pulls/23062 | 2018-10-09T18:25:59Z | 2018-10-15T17:07:34Z | 2018-10-15T17:07:33Z | 2018-10-15T18:28:01Z |
REF: collect ops dispatch functions in one place, try to de-duplicate SparseDataFrame methods | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index f29b4410fbf54..2335b26c576eb 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -943,6 +943,134 @@ def should_series_dispatch(left, right, op):
return False
+def dispatch_to_series(left, right, func, str_rep=None, axis=None):
+ """
+ Evaluate the frame operation func(left, right) by evaluating
+ column-by-column, dispatching to the Series implementation.
+
+ Parameters
+ ----------
+ left : DataFrame
+ right : scalar or DataFrame
+ func : arithmetic or comparison operator
+ str_rep : str or None, default None
+ axis : {None, 0, 1, "index", "columns"}
+
+ Returns
+ -------
+ DataFrame
+ """
+ # Note: we use iloc to access columns for compat with cases
+ # with non-unique columns.
+ import pandas.core.computation.expressions as expressions
+
+ right = lib.item_from_zerodim(right)
+ if lib.is_scalar(right):
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b)
+ for i in range(len(a.columns))}
+
+ elif isinstance(right, ABCDataFrame):
+ assert right._indexed_same(left)
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b.iloc[:, i])
+ for i in range(len(a.columns))}
+
+ elif isinstance(right, ABCSeries) and axis == "columns":
+ # We only get here if called via left._combine_match_columns,
+ # in which case we specifically want to operate row-by-row
+ assert right.index.equals(left.columns)
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b.iloc[i])
+ for i in range(len(a.columns))}
+
+ elif isinstance(right, ABCSeries):
+ assert right.index.equals(left.index) # Handle other cases later
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b)
+ for i in range(len(a.columns))}
+
+ else:
+ # Remaining cases have less-obvious dispatch rules
+ raise NotImplementedError(right)
+
+ new_data = expressions.evaluate(column_op, str_rep, left, right)
+
+ result = left._constructor(new_data, index=left.index, copy=False)
+ # Pin columns instead of passing to constructor for compat with
+ # non-unique columns case
+ result.columns = left.columns
+ return result
+
+
+def dispatch_to_index_op(op, left, right, index_class):
+ """
+ Wrap Series left in the given index_class to delegate the operation op
+ to the index implementation. DatetimeIndex and TimedeltaIndex perform
+ type checking, timezone handling, overflow checks, etc.
+
+ Parameters
+ ----------
+ op : binary operator (operator.add, operator.sub, ...)
+ left : Series
+ right : object
+ index_class : DatetimeIndex or TimedeltaIndex
+
+ Returns
+ -------
+ result : object, usually DatetimeIndex, TimedeltaIndex, or Series
+ """
+ left_idx = index_class(left)
+
+ # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
+ # left_idx may inherit a freq from a cached DatetimeIndex.
+ # See discussion in GH#19147.
+ if getattr(left_idx, 'freq', None) is not None:
+ left_idx = left_idx._shallow_copy(freq=None)
+ try:
+ result = op(left_idx, right)
+ except NullFrequencyError:
+ # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
+ # on add/sub of integers (or int-like). We re-raise as a TypeError.
+ raise TypeError('incompatible type for a datetime/timedelta '
+ 'operation [{name}]'.format(name=op.__name__))
+ return result
+
+
+def dispatch_to_extension_op(op, left, right):
+ """
+ Assume that left or right is a Series backed by an ExtensionArray,
+ apply the operator defined by op.
+ """
+
+ # The op calls will raise TypeError if the op is not defined
+ # on the ExtensionArray
+
+ # unbox Series and Index to arrays
+ if isinstance(left, (ABCSeries, ABCIndexClass)):
+ new_left = left._values
+ else:
+ new_left = left
+
+ if isinstance(right, (ABCSeries, ABCIndexClass)):
+ new_right = right._values
+ else:
+ new_right = right
+
+ res_values = op(new_left, new_right)
+ res_name = get_op_result_name(left, right)
+
+ if op.__name__ in ['divmod', 'rdivmod']:
+ return _construct_divmod_result(
+ left, res_values, left.index, res_name)
+
+ return _construct_result(left, res_values, left.index, res_name)
+
+
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
@@ -1202,36 +1330,6 @@ def _construct_divmod_result(left, result, index, name, dtype=None):
)
-def dispatch_to_extension_op(op, left, right):
- """
- Assume that left or right is a Series backed by an ExtensionArray,
- apply the operator defined by op.
- """
-
- # The op calls will raise TypeError if the op is not defined
- # on the ExtensionArray
-
- # unbox Series and Index to arrays
- if isinstance(left, (ABCSeries, ABCIndexClass)):
- new_left = left._values
- else:
- new_left = left
-
- if isinstance(right, (ABCSeries, ABCIndexClass)):
- new_right = right._values
- else:
- new_right = right
-
- res_values = op(new_left, new_right)
- res_name = get_op_result_name(left, right)
-
- if op.__name__ in ['divmod', 'rdivmod']:
- return _construct_divmod_result(
- left, res_values, left.index, res_name)
-
- return _construct_result(left, res_values, left.index, res_name)
-
-
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
@@ -1329,40 +1427,6 @@ def wrapper(left, right):
return wrapper
-def dispatch_to_index_op(op, left, right, index_class):
- """
- Wrap Series left in the given index_class to delegate the operation op
- to the index implementation. DatetimeIndex and TimedeltaIndex perform
- type checking, timezone handling, overflow checks, etc.
-
- Parameters
- ----------
- op : binary operator (operator.add, operator.sub, ...)
- left : Series
- right : object
- index_class : DatetimeIndex or TimedeltaIndex
-
- Returns
- -------
- result : object, usually DatetimeIndex, TimedeltaIndex, or Series
- """
- left_idx = index_class(left)
-
- # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
- # left_idx may inherit a freq from a cached DatetimeIndex.
- # See discussion in GH#19147.
- if getattr(left_idx, 'freq', None) is not None:
- left_idx = left_idx._shallow_copy(freq=None)
- try:
- result = op(left_idx, right)
- except NullFrequencyError:
- # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
- # on add/sub of integers (or int-like). We re-raise as a TypeError.
- raise TypeError('incompatible type for a datetime/timedelta '
- 'operation [{name}]'.format(name=op.__name__))
- return result
-
-
def _comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
@@ -1661,69 +1725,6 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# -----------------------------------------------------------------------------
# DataFrame
-def dispatch_to_series(left, right, func, str_rep=None, axis=None):
- """
- Evaluate the frame operation func(left, right) by evaluating
- column-by-column, dispatching to the Series implementation.
-
- Parameters
- ----------
- left : DataFrame
- right : scalar or DataFrame
- func : arithmetic or comparison operator
- str_rep : str or None, default None
- axis : {None, 0, 1, "index", "columns"}
-
- Returns
- -------
- DataFrame
- """
- # Note: we use iloc to access columns for compat with cases
- # with non-unique columns.
- import pandas.core.computation.expressions as expressions
-
- right = lib.item_from_zerodim(right)
- if lib.is_scalar(right):
-
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b)
- for i in range(len(a.columns))}
-
- elif isinstance(right, ABCDataFrame):
- assert right._indexed_same(left)
-
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b.iloc[:, i])
- for i in range(len(a.columns))}
-
- elif isinstance(right, ABCSeries) and axis == "columns":
- # We only get here if called via left._combine_match_columns,
- # in which case we specifically want to operate row-by-row
- assert right.index.equals(left.columns)
-
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b.iloc[i])
- for i in range(len(a.columns))}
-
- elif isinstance(right, ABCSeries):
- assert right.index.equals(left.index) # Handle other cases later
-
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b)
- for i in range(len(a.columns))}
-
- else:
- # Remaining cases have less-obvious dispatch rules
- raise NotImplementedError(right)
-
- new_data = expressions.evaluate(column_op, str_rep, left, right)
-
- result = left._constructor(new_data, index=left.index, copy=False)
- # Pin columns instead of passing to constructor for compat with
- # non-unique columns case
- result.columns = left.columns
- return result
-
def _combine_series_frame(self, other, func, fill_value=None, axis=None,
level=None):
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index e46df2b2bde70..c7d8be0d2e9e4 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -548,12 +548,12 @@ def xs(self, key, axis=0, copy=False):
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
- this, other = self.align(other, join='outer', level=level, copy=False)
- new_index, new_columns = this.index, this.columns
-
if level is not None:
raise NotImplementedError("'level' argument is not supported")
+ this, other = self.align(other, join='outer', level=level, copy=False)
+ new_index, new_columns = this.index, this.columns
+
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
@@ -573,17 +573,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None):
if col in this and col in other:
new_data[col] = func(this[col], other[col])
- # if the fill values are the same use them? or use a valid one
- new_fill_value = None
- other_fill_value = getattr(other, 'default_fill_value', np.nan)
- if self.default_fill_value == other_fill_value:
- new_fill_value = self.default_fill_value
- elif np.isnan(self.default_fill_value) and not np.isnan(
- other_fill_value):
- new_fill_value = other_fill_value
- elif not np.isnan(self.default_fill_value) and np.isnan(
- other_fill_value):
- new_fill_value = self.default_fill_value
+ new_fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
@@ -596,26 +586,16 @@ def _combine_match_index(self, other, func, level=None):
if level is not None:
raise NotImplementedError("'level' argument is not supported")
- new_index = self.index.union(other.index)
- this = self
- if self.index is not new_index:
- this = self.reindex(new_index)
-
- if other.index is not new_index:
- other = other.reindex(new_index)
+ this, other = self.align(other, join='outer', axis=0, level=level,
+ copy=False)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
- # fill_value is a function of our operator
- if isna(other.fill_value) or isna(self.default_fill_value):
- fill_value = np.nan
- else:
- fill_value = func(np.float64(self.default_fill_value),
- np.float64(other.fill_value))
+ fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(
- new_data, index=new_index, columns=self.columns,
+ new_data, index=this.index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None):
@@ -627,24 +607,56 @@ def _combine_match_columns(self, other, func, level=None):
if level is not None:
raise NotImplementedError("'level' argument is not supported")
- new_data = {}
-
- union = intersection = self.columns
+ left, right = self.align(other, join='outer', axis=1, level=level,
+ copy=False)
+ assert left.columns.equals(right.index)
- if not union.equals(other.index):
- union = other.index.union(self.columns)
- intersection = other.index.intersection(self.columns)
+ new_data = {}
- for col in intersection:
- new_data[col] = func(self[col], float(other[col]))
+ for col in left.columns:
+ new_data[col] = func(left[col], float(right[col]))
return self._constructor(
- new_data, index=self.index, columns=union,
+ new_data, index=left.index, columns=left.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, errors='raise'):
return self._apply_columns(lambda x: func(x, other))
+ def _get_op_result_fill_value(self, other, func):
+ own_default = self.default_fill_value
+
+ if isinstance(other, DataFrame):
+ # i.e. called from _combine_frame
+
+ other_default = getattr(other, 'default_fill_value', np.nan)
+
+ # if the fill values are the same use them? or use a valid one
+ if own_default == other_default:
+ # TOOD: won't this evaluate as False if both are np.nan?
+ fill_value = own_default
+ elif np.isnan(own_default) and not np.isnan(other_default):
+ fill_value = other_default
+ elif not np.isnan(own_default) and np.isnan(other_default):
+ fill_value = own_default
+ else:
+ fill_value = None
+
+ elif isinstance(other, SparseSeries):
+ # i.e. called from _combine_match_index
+
+ # fill_value is a function of our operator
+ if isna(other.fill_value) or isna(own_default):
+ fill_value = np.nan
+ else:
+ fill_value = func(np.float64(own_default),
+ np.float64(other.fill_value))
+
+ else:
+ raise NotImplementedError(type(other))
+
+ return fill_value
+
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 71eba1e6901a1..5435ec643f813 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -677,6 +677,51 @@ def test_dt64ser_sub_datetime_dtype(self):
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
+ def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
+ # these are all TypeEror ops
+ op_str = all_arithmetic_operators
+
+ def check(get_ser, test_ser):
+
+ # check that we are getting a TypeError
+ # with 'operate' (from core/ops.py) for the ops that are not
+ # defined
+ op = getattr(get_ser, op_str, None)
+ with tm.assert_raises_regex(TypeError, 'operate|cannot'):
+ op(test_ser)
+
+ # ## timedelta64 ###
+ td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
+ td1.iloc[2] = np.nan
+
+ # ## datetime64 ###
+ dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
+ Timestamp('20120103')])
+ dt1.iloc[2] = np.nan
+ dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
+ Timestamp('20120104')])
+ if op_str not in ['__sub__', '__rsub__']:
+ check(dt1, dt2)
+
+ # ## datetime64 with timetimedelta ###
+ # TODO(jreback) __rsub__ should raise?
+ if op_str not in ['__add__', '__radd__', '__sub__']:
+ check(dt1, td1)
+
+ # 8260, 10763
+ # datetime64 with tz
+ tz = 'US/Eastern'
+ dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
+ tz=tz), name='foo')
+ dt2 = dt1.copy()
+ dt2.iloc[2] = np.nan
+ td1 = Series(pd.timedelta_range('1 days 1 min', periods=5, freq='H'))
+ td2 = td1.copy()
+ td2.iloc[1] = np.nan
+
+ if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']:
+ check(dt2, td2)
+
@pytest.mark.parametrize('klass', [Series, pd.Index])
def test_sub_datetime64_not_ns(self, klass):
# GH#7996
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 8156c5ea671c2..b71af4b777022 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+from collections import deque
+from datetime import datetime
import operator
import pytest
@@ -16,28 +18,86 @@
# Comparisons
class TestFrameComparisons(object):
- def test_flex_comparison_nat(self):
- # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
- # and _definitely_ not be NaN
- df = pd.DataFrame([pd.NaT])
-
- result = df == pd.NaT
- # result.iloc[0, 0] is a np.bool_ object
- assert result.iloc[0, 0].item() is False
-
- result = df.eq(pd.NaT)
- assert result.iloc[0, 0].item() is False
-
- result = df != pd.NaT
- assert result.iloc[0, 0].item() is True
-
- result = df.ne(pd.NaT)
- assert result.iloc[0, 0].item() is True
+ # Specifically _not_ flex-comparisons
+
+ def test_comparison_invalid(self):
+
+ def check(df, df2):
+
+ for (x, y) in [(df, df2), (df2, df)]:
+ # we expect the result to match Series comparisons for
+ # == and !=, inequalities should raise
+ result = x == y
+ expected = pd.DataFrame({col: x[col] == y[col]
+ for col in x.columns},
+ index=x.index, columns=x.columns)
+ tm.assert_frame_equal(result, expected)
+
+ result = x != y
+ expected = pd.DataFrame({col: x[col] != y[col]
+ for col in x.columns},
+ index=x.index, columns=x.columns)
+ tm.assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ x >= y
+ with pytest.raises(TypeError):
+ x > y
+ with pytest.raises(TypeError):
+ x < y
+ with pytest.raises(TypeError):
+ x <= y
+
+ # GH4968
+ # invalid date/int comparisons
+ df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
+ df['dates'] = pd.date_range('20010101', periods=len(df))
+
+ df2 = df.copy()
+ df2['dates'] = df['a']
+ check(df, df2)
+
+ df = pd.DataFrame(np.random.randint(10, size=(10, 2)),
+ columns=['a', 'b'])
+ df2 = pd.DataFrame({'a': pd.date_range('20010101', periods=len(df)),
+ 'b': pd.date_range('20100101', periods=len(df))})
+ check(df, df2)
+
+ def test_timestamp_compare(self):
+ # make sure we can compare Timestamps on the right AND left hand side
+ # GH#4982
+ df = pd. DataFrame({'dates1': pd.date_range('20010101', periods=10),
+ 'dates2': pd.date_range('20010102', periods=10),
+ 'intcol': np.random.randint(1000000000, size=10),
+ 'floatcol': np.random.randn(10),
+ 'stringcol': list(tm.rands(10))})
+ df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
+ ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
+ 'ne': 'ne'}
+
+ for left, right in ops.items():
+ left_f = getattr(operator, left)
+ right_f = getattr(operator, right)
+
+ # no nats
+ if left in ['eq', 'ne']:
+ expected = left_f(df, pd.Timestamp('20010109'))
+ result = right_f(pd.Timestamp('20010109'), df)
+ tm.assert_frame_equal(result, expected)
+ else:
+ with pytest.raises(TypeError):
+ left_f(df, pd.Timestamp('20010109'))
+ with pytest.raises(TypeError):
+ right_f(pd.Timestamp('20010109'), df)
+ # nats
+ expected = left_f(df, pd.Timestamp('nat'))
+ result = right_f(pd.Timestamp('nat'), df)
+ tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
- # GH 13128, GH 22163 != datetime64 vs non-dt64 should be False,
+ # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
- # (this appears to be fixed before #22163, not sure when)
+ # (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]])
other = pd.DataFrame([['a', 'b'], ['c', 'd']])
@@ -80,6 +140,137 @@ def test_df_string_comparison(self):
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
+
+class TestFrameFlexComparisons(object):
+ # TODO: test_bool_flex_frame needs a better name
+ def test_bool_flex_frame(self):
+ data = np.random.randn(5, 3)
+ other_data = np.random.randn(5, 3)
+ df = pd.DataFrame(data)
+ other = pd.DataFrame(other_data)
+ ndim_5 = np.ones(df.shape + (1, 3))
+
+ # Unaligned
+ def _check_unaligned_frame(meth, op, df, other):
+ part_o = other.loc[3:, 1:].copy()
+ rs = meth(part_o)
+ xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
+ tm.assert_frame_equal(rs, xp)
+
+ # DataFrame
+ assert df.eq(df).values.all()
+ assert not df.ne(df).values.any()
+ for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
+ f = getattr(df, op)
+ o = getattr(operator, op)
+ # No NAs
+ tm.assert_frame_equal(f(other), o(df, other))
+ _check_unaligned_frame(f, o, df, other)
+ # ndarray
+ tm.assert_frame_equal(f(other.values), o(df, other.values))
+ # scalar
+ tm.assert_frame_equal(f(0), o(df, 0))
+ # NAs
+ msg = "Unable to coerce to Series/DataFrame"
+ tm.assert_frame_equal(f(np.nan), o(df, np.nan))
+ with tm.assert_raises_regex(ValueError, msg):
+ f(ndim_5)
+
+ # Series
+ def _test_seq(df, idx_ser, col_ser):
+ idx_eq = df.eq(idx_ser, axis=0)
+ col_eq = df.eq(col_ser)
+ idx_ne = df.ne(idx_ser, axis=0)
+ col_ne = df.ne(col_ser)
+ tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
+ tm.assert_frame_equal(col_eq, -col_ne)
+ tm.assert_frame_equal(idx_eq, -idx_ne)
+ tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
+ tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
+ tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
+ tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
+
+ idx_gt = df.gt(idx_ser, axis=0)
+ col_gt = df.gt(col_ser)
+ idx_le = df.le(idx_ser, axis=0)
+ col_le = df.le(col_ser)
+
+ tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
+ tm.assert_frame_equal(col_gt, -col_le)
+ tm.assert_frame_equal(idx_gt, -idx_le)
+ tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
+
+ idx_ge = df.ge(idx_ser, axis=0)
+ col_ge = df.ge(col_ser)
+ idx_lt = df.lt(idx_ser, axis=0)
+ col_lt = df.lt(col_ser)
+ tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
+ tm.assert_frame_equal(col_ge, -col_lt)
+ tm.assert_frame_equal(idx_ge, -idx_lt)
+ tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
+
+ idx_ser = pd.Series(np.random.randn(5))
+ col_ser = pd.Series(np.random.randn(3))
+ _test_seq(df, idx_ser, col_ser)
+
+ # list/tuple
+ _test_seq(df, idx_ser.values, col_ser.values)
+
+ # NA
+ df.loc[0, 0] = np.nan
+ rs = df.eq(df)
+ assert not rs.loc[0, 0]
+ rs = df.ne(df)
+ assert rs.loc[0, 0]
+ rs = df.gt(df)
+ assert not rs.loc[0, 0]
+ rs = df.lt(df)
+ assert not rs.loc[0, 0]
+ rs = df.ge(df)
+ assert not rs.loc[0, 0]
+ rs = df.le(df)
+ assert not rs.loc[0, 0]
+
+ # complex
+ arr = np.array([np.nan, 1, 6, np.nan])
+ arr2 = np.array([2j, np.nan, 7, None])
+ df = pd.DataFrame({'a': arr})
+ df2 = pd.DataFrame({'a': arr2})
+ rs = df.gt(df2)
+ assert not rs.values.any()
+ rs = df.ne(df2)
+ assert rs.values.all()
+
+ arr3 = np.array([2j, np.nan, None])
+ df3 = pd.DataFrame({'a': arr3})
+ rs = df3.gt(2j)
+ assert not rs.values.any()
+
+ # corner, dtype=object
+ df1 = pd.DataFrame({'col': ['foo', np.nan, 'bar']})
+ df2 = pd.DataFrame({'col': ['foo', datetime.now(), 'bar']})
+ result = df1.ne(df2)
+ exp = pd.DataFrame({'col': [False, True, False]})
+ tm.assert_frame_equal(result, exp)
+
+ def test_flex_comparison_nat(self):
+ # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
+ # and _definitely_ not be NaN
+ df = pd.DataFrame([pd.NaT])
+
+ result = df == pd.NaT
+ # result.iloc[0, 0] is a np.bool_ object
+ assert result.iloc[0, 0].item() is False
+
+ result = df.eq(pd.NaT)
+ assert result.iloc[0, 0].item() is False
+
+ result = df != pd.NaT
+ assert result.iloc[0, 0].item() is True
+
+ result = df.ne(pd.NaT)
+ assert result.iloc[0, 0].item() is True
+
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
@@ -380,3 +571,82 @@ def test_td64_df_add_int_frame(self):
df - other
with pytest.raises(TypeError):
other - df
+
+ def test_arith_mixed(self):
+
+ left = pd.DataFrame({'A': ['a', 'b', 'c'],
+ 'B': [1, 2, 3]})
+
+ result = left + left
+ expected = pd.DataFrame({'A': ['aa', 'bb', 'cc'],
+ 'B': [2, 4, 6]})
+ tm.assert_frame_equal(result, expected)
+
+ def test_arith_getitem_commute(self):
+ df = pd.DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
+
+ def _test_op(df, op):
+ result = op(df, 1)
+
+ if not df.columns.is_unique:
+ raise ValueError("Only unique columns supported by this test")
+
+ for col in result.columns:
+ tm.assert_series_equal(result[col], op(df[col], 1))
+
+ _test_op(df, operator.add)
+ _test_op(df, operator.sub)
+ _test_op(df, operator.mul)
+ _test_op(df, operator.truediv)
+ _test_op(df, operator.floordiv)
+ _test_op(df, operator.pow)
+
+ _test_op(df, lambda x, y: y + x)
+ _test_op(df, lambda x, y: y - x)
+ _test_op(df, lambda x, y: y * x)
+ _test_op(df, lambda x, y: y / x)
+ _test_op(df, lambda x, y: y ** x)
+
+ _test_op(df, lambda x, y: x + y)
+ _test_op(df, lambda x, y: x - y)
+ _test_op(df, lambda x, y: x * y)
+ _test_op(df, lambda x, y: x / y)
+ _test_op(df, lambda x, y: x ** y)
+
+ @pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
+ range(1, 3), deque([1, 2])])
+ def test_arith_alignment_non_pandas_object(self, values):
+ # GH#17901
+ df = pd.DataFrame({'A': [1, 1], 'B': [1, 1]})
+ expected = pd.DataFrame({'A': [2, 2], 'B': [3, 3]})
+ result = df + values
+ tm.assert_frame_equal(result, expected)
+
+ def test_arith_non_pandas_object(self):
+ df = pd.DataFrame(np.arange(1, 10, dtype='f8').reshape(3, 3),
+ columns=['one', 'two', 'three'],
+ index=['a', 'b', 'c'])
+
+ val1 = df.xs('a').values
+ added = pd.DataFrame(df.values + val1,
+ index=df.index, columns=df.columns)
+ tm.assert_frame_equal(df + val1, added)
+
+ added = pd.DataFrame((df.values.T + val1).T,
+ index=df.index, columns=df.columns)
+ tm.assert_frame_equal(df.add(val1, axis=0), added)
+
+ val2 = list(df['two'])
+
+ added = pd.DataFrame(df.values + val2,
+ index=df.index, columns=df.columns)
+ tm.assert_frame_equal(df + val2, added)
+
+ added = pd.DataFrame((df.values.T + val2).T, index=df.index,
+ columns=df.columns)
+ tm.assert_frame_equal(df.add(val2, axis='index'), added)
+
+ val3 = np.random.rand(*df.shape)
+ added = pd.DataFrame(df.values + val3,
+ index=df.index, columns=df.columns)
+ tm.assert_frame_equal(df.add(val3), added)
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 20ca4bc7de43e..65459735e639b 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -1,8 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
-from collections import deque
-from datetime import datetime
from decimal import Decimal
import operator
@@ -13,8 +11,7 @@
from pandas.compat import range
from pandas import compat
-from pandas import (DataFrame, Series, MultiIndex, Timestamp,
- date_range)
+from pandas import DataFrame, Series, MultiIndex
import pandas.core.common as com
import pandas as pd
@@ -243,75 +240,6 @@ def test_operators_none_as_na(self, op):
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
- def test_comparison_invalid(self):
-
- def check(df, df2):
-
- for (x, y) in [(df, df2), (df2, df)]:
- # we expect the result to match Series comparisons for
- # == and !=, inequalities should raise
- result = x == y
- expected = DataFrame({col: x[col] == y[col]
- for col in x.columns},
- index=x.index, columns=x.columns)
- assert_frame_equal(result, expected)
-
- result = x != y
- expected = DataFrame({col: x[col] != y[col]
- for col in x.columns},
- index=x.index, columns=x.columns)
- assert_frame_equal(result, expected)
-
- pytest.raises(TypeError, lambda: x >= y)
- pytest.raises(TypeError, lambda: x > y)
- pytest.raises(TypeError, lambda: x < y)
- pytest.raises(TypeError, lambda: x <= y)
-
- # GH4968
- # invalid date/int comparisons
- df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
- df['dates'] = date_range('20010101', periods=len(df))
-
- df2 = df.copy()
- df2['dates'] = df['a']
- check(df, df2)
-
- df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
- df2 = DataFrame({'a': date_range('20010101', periods=len(
- df)), 'b': date_range('20100101', periods=len(df))})
- check(df, df2)
-
- def test_timestamp_compare(self):
- # make sure we can compare Timestamps on the right AND left hand side
- # GH4982
- df = DataFrame({'dates1': date_range('20010101', periods=10),
- 'dates2': date_range('20010102', periods=10),
- 'intcol': np.random.randint(1000000000, size=10),
- 'floatcol': np.random.randn(10),
- 'stringcol': list(tm.rands(10))})
- df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
- ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
- 'ne': 'ne'}
-
- for left, right in ops.items():
- left_f = getattr(operator, left)
- right_f = getattr(operator, right)
-
- # no nats
- if left in ['eq', 'ne']:
- expected = left_f(df, Timestamp('20010109'))
- result = right_f(Timestamp('20010109'), df)
- assert_frame_equal(result, expected)
- else:
- with pytest.raises(TypeError):
- left_f(df, Timestamp('20010109'))
- with pytest.raises(TypeError):
- right_f(Timestamp('20010109'), df)
- # nats
- expected = left_f(df, Timestamp('nat'))
- result = right_f(Timestamp('nat'), df)
- assert_frame_equal(result, expected)
-
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
# TODO: not sure what's correct here.
@@ -385,158 +313,6 @@ def test_binary_ops_align(self):
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
- def test_arith_mixed(self):
-
- left = DataFrame({'A': ['a', 'b', 'c'],
- 'B': [1, 2, 3]})
-
- result = left + left
- expected = DataFrame({'A': ['aa', 'bb', 'cc'],
- 'B': [2, 4, 6]})
- assert_frame_equal(result, expected)
-
- def test_arith_getitem_commute(self):
- df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
-
- self._test_op(df, operator.add)
- self._test_op(df, operator.sub)
- self._test_op(df, operator.mul)
- self._test_op(df, operator.truediv)
- self._test_op(df, operator.floordiv)
- self._test_op(df, operator.pow)
-
- self._test_op(df, lambda x, y: y + x)
- self._test_op(df, lambda x, y: y - x)
- self._test_op(df, lambda x, y: y * x)
- self._test_op(df, lambda x, y: y / x)
- self._test_op(df, lambda x, y: y ** x)
-
- self._test_op(df, lambda x, y: x + y)
- self._test_op(df, lambda x, y: x - y)
- self._test_op(df, lambda x, y: x * y)
- self._test_op(df, lambda x, y: x / y)
- self._test_op(df, lambda x, y: x ** y)
-
- @staticmethod
- def _test_op(df, op):
- result = op(df, 1)
-
- if not df.columns.is_unique:
- raise ValueError("Only unique columns supported by this test")
-
- for col in result.columns:
- assert_series_equal(result[col], op(df[col], 1))
-
- def test_bool_flex_frame(self):
- data = np.random.randn(5, 3)
- other_data = np.random.randn(5, 3)
- df = DataFrame(data)
- other = DataFrame(other_data)
- ndim_5 = np.ones(df.shape + (1, 3))
-
- # Unaligned
- def _check_unaligned_frame(meth, op, df, other):
- part_o = other.loc[3:, 1:].copy()
- rs = meth(part_o)
- xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
- assert_frame_equal(rs, xp)
-
- # DataFrame
- assert df.eq(df).values.all()
- assert not df.ne(df).values.any()
- for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
- f = getattr(df, op)
- o = getattr(operator, op)
- # No NAs
- assert_frame_equal(f(other), o(df, other))
- _check_unaligned_frame(f, o, df, other)
- # ndarray
- assert_frame_equal(f(other.values), o(df, other.values))
- # scalar
- assert_frame_equal(f(0), o(df, 0))
- # NAs
- msg = "Unable to coerce to Series/DataFrame"
- assert_frame_equal(f(np.nan), o(df, np.nan))
- with tm.assert_raises_regex(ValueError, msg):
- f(ndim_5)
-
- # Series
- def _test_seq(df, idx_ser, col_ser):
- idx_eq = df.eq(idx_ser, axis=0)
- col_eq = df.eq(col_ser)
- idx_ne = df.ne(idx_ser, axis=0)
- col_ne = df.ne(col_ser)
- assert_frame_equal(col_eq, df == Series(col_ser))
- assert_frame_equal(col_eq, -col_ne)
- assert_frame_equal(idx_eq, -idx_ne)
- assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
- assert_frame_equal(col_eq, df.eq(list(col_ser)))
- assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
- assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
-
- idx_gt = df.gt(idx_ser, axis=0)
- col_gt = df.gt(col_ser)
- idx_le = df.le(idx_ser, axis=0)
- col_le = df.le(col_ser)
-
- assert_frame_equal(col_gt, df > Series(col_ser))
- assert_frame_equal(col_gt, -col_le)
- assert_frame_equal(idx_gt, -idx_le)
- assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
-
- idx_ge = df.ge(idx_ser, axis=0)
- col_ge = df.ge(col_ser)
- idx_lt = df.lt(idx_ser, axis=0)
- col_lt = df.lt(col_ser)
- assert_frame_equal(col_ge, df >= Series(col_ser))
- assert_frame_equal(col_ge, -col_lt)
- assert_frame_equal(idx_ge, -idx_lt)
- assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
-
- idx_ser = Series(np.random.randn(5))
- col_ser = Series(np.random.randn(3))
- _test_seq(df, idx_ser, col_ser)
-
- # list/tuple
- _test_seq(df, idx_ser.values, col_ser.values)
-
- # NA
- df.loc[0, 0] = np.nan
- rs = df.eq(df)
- assert not rs.loc[0, 0]
- rs = df.ne(df)
- assert rs.loc[0, 0]
- rs = df.gt(df)
- assert not rs.loc[0, 0]
- rs = df.lt(df)
- assert not rs.loc[0, 0]
- rs = df.ge(df)
- assert not rs.loc[0, 0]
- rs = df.le(df)
- assert not rs.loc[0, 0]
-
- # complex
- arr = np.array([np.nan, 1, 6, np.nan])
- arr2 = np.array([2j, np.nan, 7, None])
- df = DataFrame({'a': arr})
- df2 = DataFrame({'a': arr2})
- rs = df.gt(df2)
- assert not rs.values.any()
- rs = df.ne(df2)
- assert rs.values.all()
-
- arr3 = np.array([2j, np.nan, None])
- df3 = DataFrame({'a': arr3})
- rs = df3.gt(2j)
- assert not rs.values.any()
-
- # corner, dtype=object
- df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
- df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
- result = df1.ne(df2)
- exp = DataFrame({'col': [False, True, False]})
- assert_frame_equal(result, exp)
-
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
@@ -548,40 +324,6 @@ def test_dti_tz_convert_to_utc(self):
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
- def test_arith_non_pandas_object(self):
- df = self.simple
-
- val1 = df.xs('a').values
- added = DataFrame(df.values + val1, index=df.index,
- columns=df.columns)
- assert_frame_equal(df + val1, added)
-
- added = DataFrame((df.values.T + val1).T,
- index=df.index, columns=df.columns)
- assert_frame_equal(df.add(val1, axis=0), added)
-
- val2 = list(df['two'])
-
- added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
- assert_frame_equal(df + val2, added)
-
- added = DataFrame((df.values.T + val2).T, index=df.index,
- columns=df.columns)
- assert_frame_equal(df.add(val2, axis='index'), added)
-
- val3 = np.random.rand(*df.shape)
- added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
- assert_frame_equal(df.add(val3), added)
-
- @pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
- range(1, 3), deque([1, 2])])
- def test_arith_alignment_non_pandas_object(self, values):
- # GH 17901
- df = DataFrame({'A': [1, 1], 'B': [1, 1]})
- expected = DataFrame({'A': [2, 2], 'B': [3, 3]})
- result = df + values
- assert_frame_equal(result, expected)
-
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 7ee78645fe96e..e781488a799ec 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -1,18 +1,159 @@
# -*- coding: utf-8 -*-
import operator
+from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import Series
+from pandas import Series, compat
+from pandas.core.indexes.period import IncompatibleFrequency
+
+
+def _permute(obj):
+ return obj.take(np.random.permutation(len(obj)))
+
+
+class TestSeriesFlexArithmetic(object):
+ @pytest.mark.parametrize(
+ 'ts',
+ [
+ (lambda x: x, lambda x: x * 2, False),
+ (lambda x: x, lambda x: x[::2], False),
+ (lambda x: x, lambda x: 5, True),
+ (lambda x: tm.makeFloatSeries(),
+ lambda x: tm.makeFloatSeries(),
+ True)
+ ])
+ @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
+ 'truediv', 'div', 'pow'])
+ def test_flex_method_equivalence(self, opname, ts):
+ # check that Series.{opname} behaves like Series.__{opname}__,
+ tser = tm.makeTimeSeries().rename('ts')
+
+ series = ts[0](tser)
+ other = ts[1](tser)
+ check_reverse = ts[2]
+
+ if opname == 'div' and compat.PY3:
+ pytest.skip('div test only for Py3')
+
+ op = getattr(Series, opname)
+
+ if op == 'div':
+ alt = operator.truediv
+ else:
+ alt = getattr(operator, opname)
+
+ result = op(series, other)
+ expected = alt(series, other)
+ tm.assert_almost_equal(result, expected)
+ if check_reverse:
+ rop = getattr(Series, "r" + opname)
+ result = rop(series, other)
+ expected = alt(other, series)
+ tm.assert_almost_equal(result, expected)
+
+
+class TestSeriesArithmetic(object):
+ # Some of these may end up in tests/arithmetic, but are not yet sorted
+
+ def test_empty_series_add_sub(self):
+ # GH#13844
+ a = Series(dtype='M8[ns]')
+ b = Series(dtype='m8[ns]')
+ tm.assert_series_equal(a, a + b)
+ tm.assert_series_equal(a, a - b)
+ tm.assert_series_equal(a, b + a)
+ with pytest.raises(TypeError):
+ b - a
+
+ def test_add_series_with_period_index(self):
+ rng = pd.period_range('1/1/2000', '1/1/2010', freq='A')
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ result = ts + ts[::2]
+ expected = ts + ts
+ expected[1::2] = np.nan
+ tm.assert_series_equal(result, expected)
+
+ result = ts + _permute(ts[::2])
+ tm.assert_series_equal(result, expected)
+
+ msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
+ with tm.assert_raises_regex(IncompatibleFrequency, msg):
+ ts + ts.asfreq('D', how="end")
+
+ def test_operators_datetimelike(self):
+
+ # ## timedelta64 ###
+ td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
+ td1.iloc[2] = np.nan
+
+ # ## datetime64 ###
+ dt1 = Series([pd.Timestamp('20111230'), pd.Timestamp('20120101'),
+ pd.Timestamp('20120103')])
+ dt1.iloc[2] = np.nan
+ dt2 = Series([pd.Timestamp('20111231'), pd.Timestamp('20120102'),
+ pd.Timestamp('20120104')])
+ dt1 - dt2
+ dt2 - dt1
+
+ # ## datetime64 with timetimedelta ###
+ dt1 + td1
+ td1 + dt1
+ dt1 - td1
+ # TODO: Decide if this ought to work.
+ # td1 - dt1
+
+ # ## timetimedelta with datetime64 ###
+ td1 + dt1
+ dt1 + td1
+
# ------------------------------------------------------------------
# Comparisons
+class TestSeriesFlexComparison(object):
+ def test_comparison_flex_basic(self):
+ left = pd.Series(np.random.randn(10))
+ right = pd.Series(np.random.randn(10))
+
+ tm.assert_series_equal(left.eq(right), left == right)
+ tm.assert_series_equal(left.ne(right), left != right)
+ tm.assert_series_equal(left.le(right), left < right)
+ tm.assert_series_equal(left.lt(right), left <= right)
+ tm.assert_series_equal(left.gt(right), left > right)
+ tm.assert_series_equal(left.ge(right), left >= right)
+
+ # axis
+ for axis in [0, None, 'index']:
+ tm.assert_series_equal(left.eq(right, axis=axis), left == right)
+ tm.assert_series_equal(left.ne(right, axis=axis), left != right)
+ tm.assert_series_equal(left.le(right, axis=axis), left < right)
+ tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
+ tm.assert_series_equal(left.gt(right, axis=axis), left > right)
+ tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
+
+ #
+ msg = 'No axis named 1 for object type'
+ for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
+ with tm.assert_raises_regex(ValueError, msg):
+ getattr(left, op)(right, axis=1)
+
class TestSeriesComparison(object):
+ def test_comparison_different_length(self):
+ a = Series(['a', 'b', 'c'])
+ b = Series(['b', 'a'])
+ with pytest.raises(ValueError):
+ a < b
+
+ a = Series([1, 2])
+ b = Series([2, 3, 4])
+ with pytest.raises(ValueError):
+ a == b
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes(self, opname):
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 57688c7a3c3ab..082ed5e0f5123 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -13,11 +13,10 @@
import pandas.util.testing as tm
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, compat,
- date_range, isna, timedelta_range
+ date_range, isna
)
from pandas.compat import range
from pandas.core import ops
-from pandas.core.indexes.datetimes import Timestamp
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal
)
@@ -589,17 +588,6 @@ def test_nat_comparisons(self, dtype, box, reverse, pair):
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
- def test_comparison_different_length(self):
- a = Series(['a', 'b', 'c'])
- b = Series(['b', 'a'])
- with pytest.raises(ValueError):
- a < b
-
- a = Series([1, 2])
- b = Series([2, 3, 4])
- with pytest.raises(ValueError):
- a == b
-
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
@@ -638,31 +626,6 @@ def test_comp_ops_df_compat(self):
class TestSeriesFlexComparisonOps(object):
- def test_comparison_flex_basic(self):
- left = pd.Series(np.random.randn(10))
- right = pd.Series(np.random.randn(10))
-
- assert_series_equal(left.eq(right), left == right)
- assert_series_equal(left.ne(right), left != right)
- assert_series_equal(left.le(right), left < right)
- assert_series_equal(left.lt(right), left <= right)
- assert_series_equal(left.gt(right), left > right)
- assert_series_equal(left.ge(right), left >= right)
-
- # axis
- for axis in [0, None, 'index']:
- assert_series_equal(left.eq(right, axis=axis), left == right)
- assert_series_equal(left.ne(right, axis=axis), left != right)
- assert_series_equal(left.le(right, axis=axis), left < right)
- assert_series_equal(left.lt(right, axis=axis), left <= right)
- assert_series_equal(left.gt(right, axis=axis), left > right)
- assert_series_equal(left.ge(right, axis=axis), left >= right)
-
- #
- msg = 'No axis named 1 for object type'
- for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
- with tm.assert_raises_regex(ValueError, msg):
- getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
@@ -709,119 +672,7 @@ def test_comparison_flex_alignment_fill(self):
assert_series_equal(left.gt(right, fill_value=0), exp)
-class TestDatetimeSeriesArithmetic(object):
-
- def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
- # these are all TypeEror ops
- op_str = all_arithmetic_operators
-
- def check(get_ser, test_ser):
-
- # check that we are getting a TypeError
- # with 'operate' (from core/ops.py) for the ops that are not
- # defined
- op = getattr(get_ser, op_str, None)
- with tm.assert_raises_regex(TypeError, 'operate|cannot'):
- op(test_ser)
-
- # ## timedelta64 ###
- td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
- td1.iloc[2] = np.nan
-
- # ## datetime64 ###
- dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
- Timestamp('20120103')])
- dt1.iloc[2] = np.nan
- dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
- Timestamp('20120104')])
- if op_str not in ['__sub__', '__rsub__']:
- check(dt1, dt2)
-
- # ## datetime64 with timetimedelta ###
- # TODO(jreback) __rsub__ should raise?
- if op_str not in ['__add__', '__radd__', '__sub__']:
- check(dt1, td1)
-
- # 8260, 10763
- # datetime64 with tz
- tz = 'US/Eastern'
- dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
- tz=tz), name='foo')
- dt2 = dt1.copy()
- dt2.iloc[2] = np.nan
- td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
- td2 = td1.copy()
- td2.iloc[1] = np.nan
-
- if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']:
- check(dt2, td2)
-
- def test_operators_datetimelike(self):
-
- # ## timedelta64 ###
- td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
- td1.iloc[2] = np.nan
-
- # ## datetime64 ###
- dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
- Timestamp('20120103')])
- dt1.iloc[2] = np.nan
- dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
- Timestamp('20120104')])
- dt1 - dt2
- dt2 - dt1
-
- # ## datetime64 with timetimedelta ###
- dt1 + td1
- td1 + dt1
- dt1 - td1
- # TODO: Decide if this ought to work.
- # td1 - dt1
-
- # ## timetimedelta with datetime64 ###
- td1 + dt1
- dt1 + td1
-
-
class TestSeriesOperators(TestData):
- @pytest.mark.parametrize(
- 'ts',
- [
- (lambda x: x, lambda x: x * 2, False),
- (lambda x: x, lambda x: x[::2], False),
- (lambda x: x, lambda x: 5, True),
- (lambda x: tm.makeFloatSeries(),
- lambda x: tm.makeFloatSeries(),
- True)
- ])
- @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
- 'truediv', 'div', 'pow'])
- def test_op_method(self, opname, ts):
- # check that Series.{opname} behaves like Series.__{opname}__,
- tser = tm.makeTimeSeries().rename('ts')
-
- series = ts[0](tser)
- other = ts[1](tser)
- check_reverse = ts[2]
-
- if opname == 'div' and compat.PY3:
- pytest.skip('div test only for Py3')
-
- op = getattr(Series, opname)
-
- if op == 'div':
- alt = operator.truediv
- else:
- alt = getattr(operator, opname)
-
- result = op(series, other)
- expected = alt(series, other)
- assert_almost_equal(result, expected)
- if check_reverse:
- rop = getattr(Series, "r" + opname)
- result = rop(series, other)
- expected = alt(other, series)
- assert_almost_equal(result, expected)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index d80e2fd276407..88a5ff261fbb4 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -2,16 +2,11 @@
import pytest
import pandas as pd
-import pandas.core.indexes.period as period
import pandas.util.testing as tm
from pandas import DataFrame, Period, Series, period_range
from pandas.core.arrays import PeriodArray
-def _permute(obj):
- return obj.take(np.random.permutation(len(obj)))
-
-
class TestSeriesPeriod(object):
def setup_method(self, method):
@@ -116,22 +111,6 @@ def test_intercept_astype_object(self):
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
- def test_add_series(self):
- rng = period_range('1/1/2000', '1/1/2010', freq='A')
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- result = ts + ts[::2]
- expected = ts + ts
- expected[1::2] = np.nan
- tm.assert_series_equal(result, expected)
-
- result = ts + _permute(ts[::2])
- tm.assert_series_equal(result, expected)
-
- msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
- with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
- ts + ts.asfreq('D', how="end")
-
def test_align_series(self, join_type):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 497b1aef02897..b46570fcfb1a5 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -456,16 +456,6 @@ def test_timeseries_coercion(self):
assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
- def test_empty_series_ops(self):
- # see issue #13844
- a = Series(dtype='M8[ns]')
- b = Series(dtype='m8[ns]')
- assert_series_equal(a, a + b)
- assert_series_equal(a, a - b)
- assert_series_equal(a, b + a)
- with pytest.raises(TypeError):
- b - a
-
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
| https://api.github.com/repos/pandas-dev/pandas/pulls/23060 | 2018-10-09T15:27:43Z | 2018-10-28T13:48:54Z | 2018-10-28T13:48:54Z | 2018-10-28T16:17:52Z | |
Update api.rst | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 073ed8a082a11..ffa240febf731 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -906,7 +906,6 @@ Indexing, iteration
DataFrame.loc
DataFrame.iloc
DataFrame.insert
- DataFrame.insert
DataFrame.__iter__
DataFrame.items
DataFrame.keys
| remove duplicate line for DataFrame.insert(...)
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23052 | 2018-10-09T01:34:40Z | 2018-10-09T02:14:33Z | 2018-10-09T02:14:33Z | 2018-10-09T05:59:05Z |
DOC: update the pandas.DateTimeArrayMixin.is_month_start docstring | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 0f07a9cf3c0e0..b16a399c0bbb1 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -998,48 +998,60 @@ def date(self):
'dim',
"The number of days in the month")
daysinmonth = days_in_month
- is_month_start = _field_accessor(
- 'is_month_start',
- 'is_month_start',
- "Logical indicating if first day of month (defined by frequency)")
- is_month_end = _field_accessor(
- 'is_month_end',
- 'is_month_end',
- """
- Indicator for whether the date is the last day of the month.
+ _is_month_doc = """
+ Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
- For Series, returns a Series with boolean values. For
- DatetimeIndex, returns a boolean array.
+ For Series, returns a Series with boolean values.
+ For DatetimeIndex, returns a boolean array.
See Also
--------
- is_month_start : Indicator for whether the date is the first day
- of the month.
+ is_month_start : Return a boolean indicating whether the date
+ is the first day of the month.
+ is_month_end : Return a boolean indicating whether the date
+ is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
- >>> dates = pd.Series(pd.date_range("2018-02-27", periods=3))
- >>> dates
+ >>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
+ >>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
- >>> dates.dt.is_month_end
+ >>> s.dt.is_month_start
+ 0 False
+ 1 False
+ 2 True
+ dtype: bool
+ >>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
+ >>> idx.is_month_start
+ array([False, False, True])
>>> idx.is_month_end
- array([False, True, False], dtype=bool)
- """)
+ array([False, True, False])
+ """
+ is_month_start = _field_accessor(
+ 'is_month_start',
+ 'is_month_start',
+ _is_month_doc.format(first_or_last='first'))
+
+ is_month_end = _field_accessor(
+ 'is_month_end',
+ 'is_month_end',
+ _is_month_doc.format(first_or_last='last'))
+
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
| - [x] closes #20146
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
should close PR #20146 | https://api.github.com/repos/pandas-dev/pandas/pulls/23051 | 2018-10-09T01:05:35Z | 2018-10-26T09:15:50Z | 2018-10-26T09:15:50Z | 2018-10-26T10:11:36Z |
CLN GH22985 Fixed interpolation with object error message | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8de52fbfa79f0..ce70e3ce56c08 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6387,7 +6387,9 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
if _maybe_transposed_self._data.get_dtype_counts().get(
'object') == len(_maybe_transposed_self.T):
- raise TypeError("Cannot interpolate with all NaNs.")
+ raise TypeError("Cannot interpolate with all object-dtype columns "
+ "in the DataFrame. Try setting at least one "
+ "column to a numeric dtype.")
# create/use the index
if method == 'linear':
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 136299a4b81be..9d1bd9e9a0234 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -814,6 +814,19 @@ def test_interp_raise_on_only_mixed(self):
with pytest.raises(TypeError):
df.interpolate(axis=1)
+ def test_interp_raise_on_all_object_dtype(self):
+ # GH 22985
+ df = DataFrame({
+ 'A': [1, 2, 3],
+ 'B': [4, 5, 6]},
+ dtype='object')
+ with tm.assert_raises_regex(
+ TypeError,
+ "Cannot interpolate with all object-dtype columns "
+ "in the DataFrame. Try setting at least one "
+ "column to a numeric dtype."):
+ df.interpolate()
+
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
| - [x] closes #22985
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
It looks like the problem with #22985 is mostly the mismatch between the input trigger and the error message, so this is just a tiny hotfix. | https://api.github.com/repos/pandas-dev/pandas/pulls/23044 | 2018-10-08T14:17:39Z | 2018-10-10T12:04:57Z | 2018-10-10T12:04:56Z | 2018-10-10T12:05:01Z |
DOC: Add Comparison with Excel documentation | diff --git a/doc/source/comparison_with_excel.rst b/doc/source/comparison_with_excel.rst
new file mode 100644
index 0000000000000..c3f9ed2997be9
--- /dev/null
+++ b/doc/source/comparison_with_excel.rst
@@ -0,0 +1,121 @@
+.. currentmodule:: pandas
+.. _compare_with_excel:
+
+.. ipython:: python
+ :suppress:
+
+ import pandas as pd
+ import random
+ pd.options.display.max_rows=15
+
+Comparison with Excel
+*********************
+
+Commonly used Excel functionalities
+-----------------------------------
+
+Fill Handle
+~~~~~~~~~~~
+
+Create a series of numbers following a set pattern in a certain set of cells. In
+Excel this would be done by shift+drag after entering the first number or by
+entering the first two or three values and then dragging.
+
+This can be achieved by creating a series and assigning it to the desired cells.
+
+.. ipython:: python
+
+ df = pd.DataFrame({'AAA': [1] * 8, 'BBB': list(range(0, 8))}); df
+
+ series = list(range(1, 5)); series
+
+ df.iloc[2:(5+1)].AAA = series
+
+ df
+
+Filters
+~~~~~~~
+
+Filters can be achieved by using slicing.
+
+The examples filter by 0 on column AAA, and also show how to filter by multiple
+values.
+
+.. ipython:: python
+
+ df[df.AAA == 0]
+
+ df[(df.AAA == 0) | (df.AAA == 2)]
+
+
+Drop Duplicates
+~~~~~~~~~~~~~~~
+
+Another commonly used function is Drop Duplicates. This is directly supported in
+pandas.
+
+.. ipython:: python
+
+ df = pd.DataFrame({"class": ['A', 'A', 'A', 'B', 'C', 'D'], "student_count": [42, 35, 42, 50, 47, 45], "all_pass": ["Yes", "Yes", "Yes", "No", "No", "Yes"]})
+
+ df.drop_duplicates()
+
+ df.drop_duplicates(["class", "student_count"])
+
+
+Pivot Table
+~~~~~~~~~~~
+
+This can be achieved by using ``pandas.pivot_table`` for examples and reference,
+please see `pandas.pivot_table <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html>`__
+
+
+Formulae
+~~~~~~~~
+
+Let's create a new column "girls_count" and try to compute the number of boys in
+each class.
+
+.. ipython:: python
+
+ df["girls_count"] = [21, 12, 21, 31, 23, 17]; df
+
+ def get_count(row):
+ return row["student_count"] - row["girls_count"]
+
+ df["boys_count"] = df.apply(get_count, axis = 1); df
+
+
+VLOOKUP
+~~~~~~~
+
+.. ipython:: python
+
+ df1 = pd.DataFrame({"keys": [1, 2, 3, 4, 5, 6, 7], "first_names": ["harry", "ron",
+ "hermione", "rubius", "albus", "severus", "luna"]}); df1
+
+ random_names = pd.DataFrame({"surnames": ["hadrid", "malfoy", "lovegood",
+ "dumbledore", "grindelwald", "granger", "weasly", "riddle", "longbottom",
+ "snape"], "keys": [ random.randint(1,7) for x in range(0,10) ]})
+
+ random_names
+
+ random_names.merge(df1, on="keys", how='left')
+
+Adding a row
+~~~~~~~~~~~~
+
+To appended a row, we can just assign values to an index using ``iloc``.
+
+NOTE: If the index already exists, the values in that index will be over written.
+
+.. ipython:: python
+
+ df1.iloc[7] = [8, "tonks"]; df1
+
+
+Search and Replace
+~~~~~~~~~~~~~~~~~~
+
+The ``replace`` method that comes associated with the ``DataFrame`` object can perform
+this function. Please see `pandas.DataFrame.replace <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html>`__ for examples.
| closes #22993
Could someone give me more suggestions of Excel functions that are used often?
| https://api.github.com/repos/pandas-dev/pandas/pulls/23042 | 2018-10-08T11:33:42Z | 2018-11-23T03:36:32Z | null | 2018-11-23T03:36:44Z |
Doc: Adds example of exploding lists into columns instead of storing in dataframe cells | diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 79e312ca12833..1da7e38c7bab7 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -336,3 +336,94 @@ constructors using something similar to the following:
See `the NumPy documentation on byte order
<https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more
details.
+
+
+Alternative to storing lists in DataFrame Cells
+-----------------------------------------------
+Storing nested lists/arrays inside a pandas object should be avoided for performance and memory use reasons. Instead they should be "exploded" into a flat ``DataFrame`` structure.
+
+Example of exploding nested lists into a DataFrame:
+
+.. ipython:: python
+
+ df = pd.DataFrame({'name': ['A.J. Price'] * 3,
+ 'opponent': ['76ers', 'blazers', 'bobcats']},
+ columns=['name','opponent'])
+ df
+
+ nearest_neighbors = [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3
+ nearest_neighbors
+
+Create an index with the "parent" columns to be included in the final Dataframe
+
+.. ipython:: python
+
+ df = pd.concat([df[['name','opponent']], pd.DataFrame(nearest_neighbors)], axis=1)
+ df
+
+Transform the column with lists into series, which become columns in a new Dataframe.
+Note that only the index from the original df is retained - Any other columns in the original df are not part of the new df
+
+.. ipython:: python
+
+ df = df.set_index(['name', 'opponent'])
+ df
+
+Stack the new columns as rows; this creates a new index level we'll want to drop in the next step.
+Note that at this point we have a Series, not a Dataframe
+
+.. ipython:: python
+
+ ser = df.stack()
+ ser
+
+ #. Drop the extraneous index level created by the stack
+ ser.reset_index(level=2, drop=True, inplace=True)
+ ser
+
+ #. Create a Dataframe from the Series
+ df = ser.to_frame('nearest_neighbors')
+ df
+
+
+Example of exploding a list embedded in a dataframe:
+
+.. ipython:: python
+
+ df = pd.DataFrame({'name': ['A.J. Price'] * 3,
+ 'opponent': ['76ers', 'blazers', 'bobcats'],
+ 'nearest_neighbors': [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']] * 3},
+ columns=['name','opponent','nearest_neighbors'])
+ df
+
+Create an index with the "parent" columns to be included in the final Dataframe
+
+.. ipython:: python
+
+ df = df.set_index(['name', 'opponent'])
+ df
+
+Transform the column with lists into series, which become columns in a new Dataframe.
+Note that only the index from the original df is retained - any other columns in the original df are not part of the new df
+
+.. ipython:: python
+
+ df = df.nearest_neighbors.apply(pd.Series)
+ df
+
+Stack the new columns as rows; this creates a new index level we'll want to drop in the next step.
+Note that at this point we have a Series, not a Dataframe
+
+.. ipython:: python
+
+ ser = df.stack()
+ ser
+
+ #. Drop the extraneous index level created by the stack
+ ser.reset_index(level=2, drop=True, inplace=True)
+ ser
+
+ #. Create a Dataframe from the Series
+ df = ser.to_frame('nearest_neighbors')
+ df
+
| - [x] closes #17027
This is the continuation of PR #19215
| https://api.github.com/repos/pandas-dev/pandas/pulls/23041 | 2018-10-08T10:30:54Z | 2018-11-04T09:52:50Z | null | 2018-11-04T09:52:50Z |
DOC:Updated as suggested in the comments of PR#20211 | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 51c84d6e28cb4..61f3325e3dceb 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2005,15 +2005,29 @@ def __contains__(self, key):
return False
_index_shared_docs['contains'] = """
- return a boolean if this key is IN the index
+ Return a boolean if this key is in the index.
+
Parameters
----------
key : object
+ key to be searched.
Returns
-------
boolean
+ result of the search.
+
+ See Also
+ --------
+ Index.isin
+
+ Examples
+ --------
+ >>> pd.CategoricalIndex([2000, 2001, 2012]).contains(2001)
+ True
+ >>> pd.CategoricalIndex([2000, 2001, 2012]).contains(2222)
+ False
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
| - [x] PR title is "DOC: update the docstring"
- [x] closes #20211
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The following is added for the doc string :
"""
Return a boolean if this key is in the index.
Parameters
----------
key : object
key to be searched.
Returns
-------
boolean
result of the search.
See Also
--------
Index.isin
Examples
--------
>>> pd.CategoricalIndex([2000, 2001, 2012]).contains(2001)
True
>>> pd.CategoricalIndex([2000, 2001, 2012]).contains(2222)
False
"""
The suggestions mentioned in the pull request#20211 were followed. Probably this can help close pull request#20211. | https://api.github.com/repos/pandas-dev/pandas/pulls/23039 | 2018-10-08T07:56:00Z | 2018-10-08T14:37:31Z | null | 2018-10-10T12:07:08Z |
DOC: Added 'Modules Privacy Has Changed' section to overview.rst | diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 6ba9501ba0b5e..d2c5e8f2372ae 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -117,6 +117,23 @@ Institutional Partners
The information about current institutional partners can be found on `pandas website page <https://pandas.pydata.org/about.html>`__.
+Private Modules
+----------------
+
+The following modules in pandas are considered private:
+
+ * ``pandas.core`` and all sub-modules
+ * ``pandas.compat`` and all sub-modules
+
+Public Modules
+-----------------
+
+The following modules in pandas are considered public:
+
+ * ``pandas.error``
+ * ``pandas.testing``
+ * ``pandas.plotting``
+
License
-------
| - [x] closes #22942
- [x] tests passed
| https://api.github.com/repos/pandas-dev/pandas/pulls/23038 | 2018-10-08T07:49:26Z | 2018-11-20T15:06:25Z | null | 2018-11-20T15:09:01Z |
BUG-22796 Concat multicolumn tz-aware DataFrame | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index ca4ea8e366754..a4209ba90aaee 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -892,6 +892,7 @@ Reshaping
- Bug in :func:`pandas.wide_to_long` when a string is passed to the stubnames argument and a column name is a substring of that stubname (:issue:`22468`)
- Bug in :func:`merge` when merging ``datetime64[ns, tz]`` data that contained a DST transition (:issue:`18885`)
- Bug in :func:`merge_asof` when merging on float values within defined tolerance (:issue:`22981`)
+- Bug in :func:`pandas.concat` when concatenating a multicolumn DataFrame with tz-aware data against a DataFrame with a different number of columns (:issue`22796`)
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 611cae28877c3..f07fb3cd80eab 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -546,6 +546,17 @@ def __new__(cls, unit=None, tz=None):
cls._cache[key] = u
return u
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ from pandas import DatetimeIndex
+ return DatetimeIndex
+
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 5a3f11525acf8..6d67070000dcd 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -186,6 +186,10 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
if getattr(self.block, 'is_datetimetz', False) or \
is_datetimetz(empty_dtype):
+ if self.block is None:
+ array = empty_dtype.construct_array_type()
+ missing_arr = array([fill_value], dtype=empty_dtype)
+ return missing_arr.repeat(self.shape[1])
pass
elif getattr(self.block, 'is_categorical', False):
pass
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index d1f921bc5e894..ece9559313ba0 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -54,6 +54,38 @@ def test_concat_multiple_tzs(self):
expected = DataFrame(dict(time=[ts2, ts3]))
assert_frame_equal(results, expected)
+ @pytest.mark.parametrize(
+ 't1',
+ [
+ '2015-01-01',
+ pytest.param(pd.NaT, marks=pytest.mark.xfail(
+ reason='GH23037 incorrect dtype when concatenating',
+ strict=True))])
+ def test_concat_tz_NaT(self, t1):
+ # GH 22796
+ # Concating tz-aware multicolumn DataFrames
+ ts1 = Timestamp(t1, tz='UTC')
+ ts2 = Timestamp('2015-01-01', tz='UTC')
+ ts3 = Timestamp('2015-01-01', tz='UTC')
+
+ df1 = DataFrame([[ts1, ts2]])
+ df2 = DataFrame([[ts3]])
+
+ result = pd.concat([df1, df2])
+ expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
+
+ assert_frame_equal(result, expected)
+
+ def test_concat_tz_not_aligned(self):
+ # GH 22796
+ ts = pd.to_datetime([1, 2]).tz_localize("UTC")
+ a = pd.DataFrame({"A": ts})
+ b = pd.DataFrame({"A": ts, "B": ts})
+ result = pd.concat([a, b], sort=True, ignore_index=True)
+ expected = pd.DataFrame({"A": list(ts) + list(ts),
+ "B": [pd.NaT, pd.NaT] + list(ts)})
+ assert_frame_equal(result, expected)
+
def test_concat_tuple_keys(self):
# GH 14438
df1 = pd.DataFrame(np.ones((2, 2)), columns=list('AB'))
| - [X] closes #22796
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Numpy arrays don't have the datetimetz dtype, so I just passed through the DatetimeIndex directly.
Side note: There's another small bug (I think) where np.nan or pd.NaT takes on the dtype of the column instead of the row when concatenating, but the column should instead have an object dtype.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23036 | 2018-10-08T01:48:14Z | 2018-10-09T18:41:43Z | 2018-10-09T18:41:43Z | 2018-10-09T18:42:09Z |
BUG: Fixed nlargest/smallest functionality for dataframes with MultiIndex columns | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index a41b0c9521f99..a547edec2f3ce 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1194,6 +1194,7 @@ Reshaping
- Bug in :func:`merge_asof` when merging on float values within defined tolerance (:issue:`22981`)
- Bug in :func:`pandas.concat` when concatenating a multicolumn DataFrame with tz-aware data against a DataFrame with a different number of columns (:issue`22796`)
- Bug in :func:`merge_asof` where confusing error message raised when attempting to merge with missing values (:issue:`23189`)
+- Bug in :meth:`DataFrame.nsmallest` and :meth:`DataFrame.nlargest` for dataframes that have :class:`MultiIndex`ed columns (:issue:`23033`).
.. _whatsnew_0240.bug_fixes.sparse:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 0f1eb12883fd5..df2da26685a16 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1161,7 +1161,7 @@ class SelectNFrame(SelectN):
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
- if not is_list_like(columns):
+ if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index b83fba7e7b277..ab4eaf02f38dd 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -2153,7 +2153,7 @@ def test_n(self, df_strings, nselect_method, n, order):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
- ('group', 'category_string'), ('group', 'string')])
+ ['group', 'category_string'], ['group', 'string']])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
@@ -2259,3 +2259,20 @@ def test_series_nat_conversion(self):
df.rank()
result = df
tm.assert_frame_equal(result, expected)
+
+ def test_multiindex_column_lookup(self):
+ # Check whether tuples are correctly treated as multi-level lookups.
+ # GH 23033
+ df = pd.DataFrame(
+ columns=pd.MultiIndex.from_product([['x'], ['a', 'b']]),
+ data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]])
+
+ # nsmallest
+ result = df.nsmallest(3, ('x', 'a'))
+ expected = df.iloc[[2, 0, 3]]
+ tm.assert_frame_equal(result, expected)
+
+ # nlargest
+ result = df.nlargest(3, ('x', 'b'))
+ expected = df.iloc[[3, 2, 1]]
+ tm.assert_frame_equal(result, expected)
| This change fixes nlargest/smallest functionality for dataframes with MultiIndex columns.
- [x] closes #23033
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23034 | 2018-10-08T00:45:00Z | 2018-10-24T12:33:14Z | 2018-10-24T12:33:13Z | 2018-10-24T22:09:00Z |
Add cookbook entry for triangular correlation matrix (closes #22840) | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index be8457fc14a4f..21d1f11ba49ba 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -1226,6 +1226,17 @@ Computation
Correlation
***********
+Often it's useful to obtain the lower (or upper) triangular form of a correlation matrix calculated from :func:`DataFrame.corr`. This can be achieved by passing a boolean mask to ``where`` as follows:
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.random.random(size=(100, 5)))
+
+ corr_mat = df.corr()
+ mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1)
+
+ corr_mat.where(mask)
+
The `method` argument within `DataFrame.corr` can accept a callable in addition to the named correlation types. Here we compute the `distance correlation <https://en.wikipedia.org/wiki/Distance_correlation>`__ matrix for a `DataFrame` object.
.. code-block:: python
| - [ x ] closes #22840
- [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/23032 | 2018-10-08T00:38:08Z | 2018-11-03T14:36:24Z | 2018-11-03T14:36:24Z | 2018-11-03T14:40:37Z |
BUG: Fix PeriodIndex +/- TimedeltaIndex | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index a4209ba90aaee..0eeaff2fe62a1 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -729,6 +729,7 @@ Datetimelike
- Bug in :class:`DatetimeIndex` where frequency was being set if original frequency was ``None`` (:issue:`22150`)
- Bug in rounding methods of :class:`DatetimeIndex` (:meth:`~DatetimeIndex.round`, :meth:`~DatetimeIndex.ceil`, :meth:`~DatetimeIndex.floor`) and :class:`Timestamp` (:meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, :meth:`~Timestamp.floor`) could give rise to loss of precision (:issue:`22591`)
- Bug in :func:`to_datetime` with an :class:`Index` argument that would drop the ``name`` from the result (:issue:`21697`)
+- Bug in :class:`PeriodIndex` where adding or subtracting a :class:`timedelta` or :class:`Tick` object produced incorrect results (:issue:`22988`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index e4ace2bfe1509..a8c3b372e278f 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -382,6 +382,11 @@ def _add_delta_tdi(self, other):
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
+ if isinstance(other, np.ndarray):
+ # ndarray[timedelta64]; wrap in TimedeltaIndex for op
+ from pandas import TimedeltaIndex
+ other = TimedeltaIndex(other)
+
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
@@ -632,11 +637,17 @@ def __add__(self, other):
return self._add_datelike(other)
elif is_integer_dtype(other):
result = self._addsub_int_array(other, operator.add)
- elif is_float_dtype(other) or is_period_dtype(other):
+ elif is_float_dtype(other):
# Explicitly catch invalid dtypes
raise TypeError("cannot add {dtype}-dtype to {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
+ elif is_period_dtype(other):
+ # if self is a TimedeltaArray and other is a PeriodArray with
+ # a timedelta-like (i.e. Tick) freq, this operation is valid.
+ # Defer to the PeriodArray implementation.
+ # In remaining cases, this will end up raising TypeError.
+ return NotImplemented
elif is_extension_array_dtype(other):
# Categorical op will raise; defer explicitly
return NotImplemented
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 7daaa8de1734f..bfddce662123f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -506,7 +506,7 @@ def _add_delta(self, delta):
Parameters
----------
delta : {timedelta, np.timedelta64, DateOffset,
- TimedelaIndex, ndarray[timedelta64]}
+ TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 6d13fb9ecaa39..7aaf3ddbb9c67 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from datetime import timedelta
+import operator
import warnings
import numpy as np
@@ -17,8 +18,8 @@
from pandas.util._decorators import (cache_readonly, deprecate_kwarg)
from pandas.core.dtypes.common import (
- is_integer_dtype, is_float_dtype, is_period_dtype,
- is_datetime64_dtype)
+ is_integer_dtype, is_float_dtype, is_period_dtype, is_timedelta64_dtype,
+ is_datetime64_dtype, _TD_DTYPE)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCSeries
@@ -355,24 +356,54 @@ def _add_offset(self, other):
return self._time_shift(other.n)
def _add_delta_td(self, other):
+ assert isinstance(self.freq, Tick) # checked by calling function
assert isinstance(other, (timedelta, np.timedelta64, Tick))
- nanos = delta_to_nanoseconds(other)
- own_offset = frequencies.to_offset(self.freq.rule_code)
- if isinstance(own_offset, Tick):
- offset_nanos = delta_to_nanoseconds(own_offset)
- if np.all(nanos % offset_nanos == 0):
- return self._time_shift(nanos // offset_nanos)
+ delta = self._check_timedeltalike_freq_compat(other)
- # raise when input doesn't have freq
- raise IncompatibleFrequency("Input has different freq from "
- "{cls}(freq={freqstr})"
- .format(cls=type(self).__name__,
- freqstr=self.freqstr))
+ # Note: when calling parent class's _add_delta_td, it will call
+ # delta_to_nanoseconds(delta). Because delta here is an integer,
+ # delta_to_nanoseconds will return it unchanged.
+ return DatetimeLikeArrayMixin._add_delta_td(self, delta)
+
+ def _add_delta_tdi(self, other):
+ assert isinstance(self.freq, Tick) # checked by calling function
+
+ delta = self._check_timedeltalike_freq_compat(other)
+ return self._addsub_int_array(delta, operator.add)
def _add_delta(self, other):
- ordinal_delta = self._maybe_convert_timedelta(other)
- return self._time_shift(ordinal_delta)
+ """
+ Add a timedelta-like, Tick, or TimedeltaIndex-like object
+ to self.
+
+ Parameters
+ ----------
+ other : {timedelta, np.timedelta64, Tick,
+ TimedeltaIndex, ndarray[timedelta64]}
+
+ Returns
+ -------
+ result : same type as self
+ """
+ if not isinstance(self.freq, Tick):
+ # We cannot add timedelta-like to non-tick PeriodArray
+ raise IncompatibleFrequency("Input has different freq from "
+ "{cls}(freq={freqstr})"
+ .format(cls=type(self).__name__,
+ freqstr=self.freqstr))
+
+ # TODO: standardize across datetimelike subclasses whether to return
+ # i8 view or _shallow_copy
+ if isinstance(other, (Tick, timedelta, np.timedelta64)):
+ new_values = self._add_delta_td(other)
+ return self._shallow_copy(new_values)
+ elif is_timedelta64_dtype(other):
+ # ndarray[timedelta64] or TimedeltaArray/index
+ new_values = self._add_delta_tdi(other)
+ return self._shallow_copy(new_values)
+ else: # pragma: no cover
+ raise TypeError(type(other).__name__)
@deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
def shift(self, periods):
@@ -428,14 +459,9 @@ def _maybe_convert_timedelta(self, other):
other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
- if isinstance(other, np.ndarray):
- nanos = np.vectorize(delta_to_nanoseconds)(other)
- else:
- nanos = delta_to_nanoseconds(other)
- offset_nanos = delta_to_nanoseconds(offset)
- check = np.all(nanos % offset_nanos == 0)
- if check:
- return nanos // offset_nanos
+ # _check_timedeltalike_freq_compat will raise if incompatible
+ delta = self._check_timedeltalike_freq_compat(other)
+ return delta
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
@@ -454,6 +480,58 @@ def _maybe_convert_timedelta(self, other):
raise IncompatibleFrequency(msg.format(cls=type(self).__name__,
freqstr=self.freqstr))
+ def _check_timedeltalike_freq_compat(self, other):
+ """
+ Arithmetic operations with timedelta-like scalars or array `other`
+ are only valid if `other` is an integer multiple of `self.freq`.
+ If the operation is valid, find that integer multiple. Otherwise,
+ raise because the operation is invalid.
+
+ Parameters
+ ----------
+ other : timedelta, np.timedelta64, Tick,
+ ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
+
+ Returns
+ -------
+ multiple : int or ndarray[int64]
+
+ Raises
+ ------
+ IncompatibleFrequency
+ """
+ assert isinstance(self.freq, Tick) # checked by calling function
+ own_offset = frequencies.to_offset(self.freq.rule_code)
+ base_nanos = delta_to_nanoseconds(own_offset)
+
+ if isinstance(other, (timedelta, np.timedelta64, Tick)):
+ nanos = delta_to_nanoseconds(other)
+
+ elif isinstance(other, np.ndarray):
+ # numpy timedelta64 array; all entries must be compatible
+ assert other.dtype.kind == 'm'
+ if other.dtype != _TD_DTYPE:
+ # i.e. non-nano unit
+ # TODO: disallow unit-less timedelta64
+ other = other.astype(_TD_DTYPE)
+ nanos = other.view('i8')
+ else:
+ # TimedeltaArray/Index
+ nanos = other.asi8
+
+ if np.all(nanos % base_nanos == 0):
+ # nanos being added is an integer multiple of the
+ # base-frequency to self.freq
+ delta = nanos // base_nanos
+ # delta is the integer (or integer-array) number of periods
+ # by which will be added to self.
+ return delta
+
+ raise IncompatibleFrequency("Input has different freq from "
+ "{cls}(freq={freqstr})"
+ .format(cls=type(self).__name__,
+ freqstr=self.freqstr))
+
PeriodArrayMixin._add_comparison_ops()
PeriodArrayMixin._add_datetimelike_methods()
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index 3210290b9c5c8..d81ab2b3a2ec3 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -446,26 +446,36 @@ def test_pi_add_sub_td64_array_non_tick_raises(self):
with pytest.raises(period.IncompatibleFrequency):
tdarr - rng
- @pytest.mark.xfail(reason='op with TimedeltaIndex raises, with ndarray OK',
- strict=True)
def test_pi_add_sub_td64_array_tick(self):
- rng = pd.period_range('1/1/2000', freq='Q', periods=3)
+ # PeriodIndex + Timedelta-like is allowed only with
+ # tick-like frequencies
+ rng = pd.period_range('1/1/2000', freq='90D', periods=3)
tdi = pd.TimedeltaIndex(['-1 Day', '-1 Day', '-1 Day'])
tdarr = tdi.values
- expected = rng + tdi
+ expected = pd.period_range('12/31/1999', freq='90D', periods=3)
+ result = rng + tdi
+ tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
+ result = tdi + rng
+ tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
- expected = rng - tdi
+ expected = pd.period_range('1/2/2000', freq='90D', periods=3)
+
+ result = rng - tdi
+ tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
+ with pytest.raises(TypeError):
+ tdi - rng
+
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@@ -596,6 +606,56 @@ def test_pi_sub_intarray(self, box):
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
+ def test_pi_add_timedeltalike_minute_gt1(self, three_days):
+ # GH#23031 adding a time-delta-like offset to a PeriodArray that has
+ # minute frequency with n != 1. A more general case is tested below
+ # in test_pi_add_timedeltalike_tick_gt1, but here we write out the
+ # expected result more explicitly.
+ other = three_days
+ rng = pd.period_range('2014-05-01', periods=3, freq='2D')
+
+ expected = pd.PeriodIndex(['2014-05-04', '2014-05-06', '2014-05-08'],
+ freq='2D')
+
+ result = rng + other
+ tm.assert_index_equal(result, expected)
+
+ result = other + rng
+ tm.assert_index_equal(result, expected)
+
+ # subtraction
+ expected = pd.PeriodIndex(['2014-04-28', '2014-04-30', '2014-05-02'],
+ freq='2D')
+ result = rng - other
+ tm.assert_index_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ other - rng
+
+ @pytest.mark.parametrize('freqstr', ['5ns', '5us', '5ms',
+ '5s', '5T', '5h', '5d'])
+ def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
+ # GH#23031 adding a time-delta-like offset to a PeriodArray that has
+ # tick-like frequency with n != 1
+ other = three_days
+ rng = pd.period_range('2014-05-01', periods=6, freq=freqstr)
+
+ expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
+
+ result = rng + other
+ tm.assert_index_equal(result, expected)
+
+ result = other + rng
+ tm.assert_index_equal(result, expected)
+
+ # subtraction
+ expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
+ result = rng - other
+ tm.assert_index_equal(result, expected)
+
+ with pytest.raises(TypeError):
+ other - rng
+
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 4e01e0feb004c..61a3c4bb6934e 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -805,6 +805,7 @@ def assert_index_equal(left, right, exact='equiv', check_names=True,
Specify object name being compared, internally used to show appropriate
assertion message
"""
+ __tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
@@ -1048,6 +1049,8 @@ def assert_interval_array_equal(left, right, exact='equiv',
def raise_assert_detail(obj, message, left, right, diff=None):
+ __tracebackhide__ = True
+
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
| - [x] closes #22998
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23031 | 2018-10-07T22:28:11Z | 2018-10-15T00:49:09Z | 2018-10-15T00:49:09Z | 2018-10-15T00:49:23Z |
TST: collect logical ops tests, use fixtures | diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index a09efe6d4761c..a8e61b3fd9d3a 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -388,19 +388,11 @@ def test_td64arr_sub_period(self, box, freq):
with pytest.raises(TypeError):
p - idx
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="broadcasts along "
- "wrong axis",
- raises=ValueError,
- strict=True))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
- def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
+ def test_td64arr_sub_pi(self, box_df_broadcast_failure, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
+ box = box_df_broadcast_failure
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
@@ -529,16 +521,9 @@ def test_td64arr_rsub_int_series_invalid(self, box, tdser):
with pytest.raises(err):
Series([2, 3, 4]) - tdser
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Attempts to broadcast "
- "incorrectly",
- strict=True, raises=ValueError))
- ], ids=lambda x: x.__name__)
- def test_td64arr_add_intlike(self, box):
+ def test_td64arr_add_intlike(self, box_df_broadcast_failure):
# GH#19123
+ box = box_df_broadcast_failure
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
@@ -706,21 +691,13 @@ def test_td64arr_sub_td64_array(self, box_df_broadcast_failure):
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Tries to broadcast "
- "incorrectly leading "
- "to alignment error",
- strict=True, raises=ValueError))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
- def test_td64arr_add_sub_tdi(self, box, names):
+ def test_td64arr_add_sub_tdi(self, box_df_broadcast_failure, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
+ box = box_df_broadcast_failure
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
@@ -830,19 +807,12 @@ def test_timedelta64_operations_with_DateOffset(self):
td - op(5)
op(5) - td
- @pytest.mark.parametrize('box', [
- pd.Index,
- Series,
- pytest.param(pd.DataFrame,
- marks=pytest.mark.xfail(reason="Tries to broadcast "
- "incorrectly",
- strict=True, raises=ValueError))
- ], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
- def test_td64arr_add_offset_index(self, names, box):
+ def test_td64arr_add_offset_index(self, names, box_df_broadcast_failure):
# GH#18849, GH#19744
+ box = box_df_broadcast_failure
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 6ed289614b96a..433b0f09e13bc 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -27,38 +27,122 @@
from pandas.tests.frame.common import TestData, _check_mixed_float
-class TestDataFrameOperators(TestData):
+class TestDataFrameUnaryOperators(object):
+ # __pos__, __neg__, __inv__
+
+ @pytest.mark.parametrize('df,expected', [
+ (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
+ (pd.DataFrame({'a': [False, True]}),
+ pd.DataFrame({'a': [True, False]})),
+ (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
+ pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
+ ])
+ def test_neg_numeric(self, df, expected):
+ assert_frame_equal(-df, expected)
+ assert_series_equal(-df['a'], expected['a'])
+
+ @pytest.mark.parametrize('df, expected', [
+ (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
+ ([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]),
+ ])
+ def test_neg_object(self, df, expected):
+ # GH#21380
+ df = pd.DataFrame({'a': df})
+ expected = pd.DataFrame({'a': expected})
+ assert_frame_equal(-df, expected)
+ assert_series_equal(-df['a'], expected['a'])
+
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': ['a', 'b']}),
+ pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
+ ])
+ def test_neg_raises(self, df):
+ with pytest.raises(TypeError):
+ (- df)
+ with pytest.raises(TypeError):
+ (- df['a'])
+
+ def test_invert(self):
+ _seriesd = tm.getSeriesData()
+ df = pd.DataFrame(_seriesd)
+
+ assert_frame_equal(-(df < 0), ~(df < 0))
+
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': [-1, 1]}),
+ pd.DataFrame({'a': [False, True]}),
+ pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
+ ])
+ def test_pos_numeric(self, df):
+ # GH#16073
+ assert_frame_equal(+df, df)
+ assert_series_equal(+df['a'], df['a'])
+
+ @pytest.mark.parametrize('df', [
+ # numpy changing behavior in the future
+ pytest.param(pd.DataFrame({'a': ['a', 'b']}),
+ marks=[pytest.mark.filterwarnings("ignore")]),
+ pd.DataFrame({'a': np.array([-1, 2], dtype=object)}),
+ pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}),
+ ])
+ def test_pos_object(self, df):
+ # GH#21380
+ assert_frame_equal(+df, df)
+ assert_series_equal(+df['a'], df['a'])
+
+ @pytest.mark.parametrize('df', [
+ pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
+ ])
+ def test_pos_raises(self, df):
+ with pytest.raises(TypeError):
+ (+ df)
+ with pytest.raises(TypeError):
+ (+ df['a'])
- def test_operators_boolean(self):
- # GH 5808
+class TestDataFrameLogicalOperators(object):
+ # &, |, ^
+
+ def test_logical_ops_empty_frame(self):
+ # GH#5808
# empty frames, non-mixed dtype
+ df = DataFrame(index=[1])
+
+ result = df & df
+ assert_frame_equal(result, df)
+
+ result = df | df
+ assert_frame_equal(result, df)
+
+ df2 = DataFrame(index=[1, 2])
+ result = df & df2
+ assert_frame_equal(result, df2)
- result = DataFrame(index=[1]) & DataFrame(index=[1])
- assert_frame_equal(result, DataFrame(index=[1]))
+ dfa = DataFrame(index=[1], columns=['A'])
- result = DataFrame(index=[1]) | DataFrame(index=[1])
- assert_frame_equal(result, DataFrame(index=[1]))
+ result = dfa & dfa
+ assert_frame_equal(result, dfa)
- result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
- assert_frame_equal(result, DataFrame(index=[1, 2]))
+ def test_logical_ops_bool_frame(self):
+ # GH#5808
+ df1a_bool = DataFrame(True, index=[1], columns=['A'])
- result = DataFrame(index=[1], columns=['A']) & DataFrame(
- index=[1], columns=['A'])
- assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
+ result = df1a_bool & df1a_bool
+ assert_frame_equal(result, df1a_bool)
- result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
- True, index=[1], columns=['A'])
- assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
+ result = df1a_bool | df1a_bool
+ assert_frame_equal(result, df1a_bool)
- result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
- True, index=[1], columns=['A'])
- assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
+ def test_logical_ops_int_frame(self):
+ # GH#5808
+ df1a_int = DataFrame(1, index=[1], columns=['A'])
+ df1a_bool = DataFrame(True, index=[1], columns=['A'])
- # boolean ops
- result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
- True, index=[1], columns=['A'])
- assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
+ result = df1a_int | df1a_bool
+ assert_frame_equal(result, df1a_int)
+
+ def test_logical_ops_invalid(self):
+ # GH#5808
df1 = DataFrame(1.0, index=[1], columns=['A'])
df2 = DataFrame(True, index=[1], columns=['A'])
@@ -70,6 +154,70 @@ def test_operators_boolean(self):
with pytest.raises(TypeError):
df1 | df2
+ def test_logical_operators(self):
+
+ def _check_bin_op(op):
+ result = op(df1, df2)
+ expected = DataFrame(op(df1.values, df2.values), index=df1.index,
+ columns=df1.columns)
+ assert result.values.dtype == np.bool_
+ assert_frame_equal(result, expected)
+
+ def _check_unary_op(op):
+ result = op(df1)
+ expected = DataFrame(op(df1.values), index=df1.index,
+ columns=df1.columns)
+ assert result.values.dtype == np.bool_
+ assert_frame_equal(result, expected)
+
+ df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
+ 'b': {'a': False, 'b': True, 'c': False,
+ 'd': False, 'e': False},
+ 'c': {'a': False, 'b': False, 'c': True,
+ 'd': False, 'e': False},
+ 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
+ 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
+
+ df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
+ 'b': {'a': False, 'b': True, 'c': False,
+ 'd': False, 'e': False},
+ 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
+ 'd': {'a': False, 'b': False, 'c': False,
+ 'd': True, 'e': False},
+ 'e': {'a': False, 'b': False, 'c': False,
+ 'd': False, 'e': True}}
+
+ df1 = DataFrame(df1)
+ df2 = DataFrame(df2)
+
+ _check_bin_op(operator.and_)
+ _check_bin_op(operator.or_)
+ _check_bin_op(operator.xor)
+
+ # operator.neg is deprecated in numpy >= 1.9
+ _check_unary_op(operator.inv) # TODO: belongs elsewhere
+
+ def test_logical_with_nas(self):
+ d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
+
+ # GH4947
+ # bool comparisons should return bool
+ result = d['a'] | d['b']
+ expected = Series([False, True])
+ assert_series_equal(result, expected)
+
+ # GH4604, automatic casting here
+ result = d['a'].fillna(False) | d['b']
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+ result = d['a'].fillna(False, downcast=False) | d['b']
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+
+class TestDataFrameOperators(TestData):
+
@pytest.mark.parametrize('op', [operator.add, operator.sub,
operator.mul, operator.truediv])
def test_operators_none_as_na(self, op):
@@ -164,142 +312,15 @@ def test_timestamp_compare(self):
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
- def test_logical_operators(self):
-
- def _check_bin_op(op):
- result = op(df1, df2)
- expected = DataFrame(op(df1.values, df2.values), index=df1.index,
- columns=df1.columns)
- assert result.values.dtype == np.bool_
- assert_frame_equal(result, expected)
-
- def _check_unary_op(op):
- result = op(df1)
- expected = DataFrame(op(df1.values), index=df1.index,
- columns=df1.columns)
- assert result.values.dtype == np.bool_
- assert_frame_equal(result, expected)
-
- df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
- 'b': {'a': False, 'b': True, 'c': False,
- 'd': False, 'e': False},
- 'c': {'a': False, 'b': False, 'c': True,
- 'd': False, 'e': False},
- 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
- 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
-
- df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
- 'b': {'a': False, 'b': True, 'c': False,
- 'd': False, 'e': False},
- 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
- 'd': {'a': False, 'b': False, 'c': False,
- 'd': True, 'e': False},
- 'e': {'a': False, 'b': False, 'c': False,
- 'd': False, 'e': True}}
-
- df1 = DataFrame(df1)
- df2 = DataFrame(df2)
-
- _check_bin_op(operator.and_)
- _check_bin_op(operator.or_)
- _check_bin_op(operator.xor)
-
- # operator.neg is deprecated in numpy >= 1.9
- _check_unary_op(operator.inv)
-
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
- # not sure what's correct here.
+ # TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
result = getattr(self.frame, op)('foo')
assert bool(result.all().all()) is res
- def test_logical_with_nas(self):
- d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
-
- # GH4947
- # bool comparisons should return bool
- result = d['a'] | d['b']
- expected = Series([False, True])
- assert_series_equal(result, expected)
-
- # GH4604, automatic casting here
- result = d['a'].fillna(False) | d['b']
- expected = Series([True, True])
- assert_series_equal(result, expected)
-
- result = d['a'].fillna(False, downcast=False) | d['b']
- expected = Series([True, True])
- assert_series_equal(result, expected)
-
- @pytest.mark.parametrize('df,expected', [
- (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
- (pd.DataFrame({'a': [False, True]}),
- pd.DataFrame({'a': [True, False]})),
- (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
- pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
- ])
- def test_neg_numeric(self, df, expected):
- assert_frame_equal(-df, expected)
- assert_series_equal(-df['a'], expected['a'])
-
- @pytest.mark.parametrize('df, expected', [
- (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
- ([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]),
- ])
- def test_neg_object(self, df, expected):
- # GH 21380
- df = pd.DataFrame({'a': df})
- expected = pd.DataFrame({'a': expected})
- assert_frame_equal(-df, expected)
- assert_series_equal(-df['a'], expected['a'])
-
- @pytest.mark.parametrize('df', [
- pd.DataFrame({'a': ['a', 'b']}),
- pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
- ])
- def test_neg_raises(self, df):
- with pytest.raises(TypeError):
- (- df)
- with pytest.raises(TypeError):
- (- df['a'])
-
- def test_invert(self):
- assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
-
- @pytest.mark.parametrize('df', [
- pd.DataFrame({'a': [-1, 1]}),
- pd.DataFrame({'a': [False, True]}),
- pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
- ])
- def test_pos_numeric(self, df):
- # GH 16073
- assert_frame_equal(+df, df)
- assert_series_equal(+df['a'], df['a'])
-
- @pytest.mark.parametrize('df', [
- # numpy changing behavior in the future
- pytest.param(pd.DataFrame({'a': ['a', 'b']}),
- marks=[pytest.mark.filterwarnings("ignore")]),
- pd.DataFrame({'a': np.array([-1, 2], dtype=object)}),
- pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}),
- ])
- def test_pos_object(self, df):
- # GH 21380
- assert_frame_equal(+df, df)
- assert_series_equal(+df['a'], df['a'])
-
- @pytest.mark.parametrize('df', [
- pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
- ])
- def test_pos_raises(self, df):
- with pytest.raises(TypeError):
- (+ df)
- with pytest.raises(TypeError):
- (+ df['a'])
-
def test_binary_ops_align(self):
# test aligning binary ops
diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py
index d56df2371b2e3..d52b848bebad1 100644
--- a/pandas/tests/frame/test_period.py
+++ b/pandas/tests/frame/test_period.py
@@ -14,9 +14,6 @@ def _permute(obj):
class TestPeriodIndex(object):
- def setup_method(self, method):
- pass
-
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index b1d9d362d1402..40089c8e9e477 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -182,7 +182,7 @@ def test_frame_ctor_datetime64_column(self):
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
- def test_frame_add_datetime64_column(self):
+ def test_frame_append_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
@@ -195,7 +195,7 @@ def test_frame_datetime64_pre1900_repr(self):
# it works!
repr(df)
- def test_frame_add_datetime64_col_other_units(self):
+ def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index f3ab197771d53..55e3dfde3ceb7 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -25,6 +25,361 @@
from .common import TestData
+class TestSeriesLogicalOps(object):
+ @pytest.mark.parametrize('bool_op', [operator.and_,
+ operator.or_, operator.xor])
+ def test_bool_operators_with_nas(self, bool_op):
+ # boolean &, |, ^ should work with object arrays and propagate NAs
+ ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
+ ser[::2] = np.nan
+
+ mask = ser.isna()
+ filled = ser.fillna(ser[0])
+
+ result = bool_op(ser < ser[9], ser > ser[3])
+
+ expected = bool_op(filled < filled[9], filled > filled[3])
+ expected[mask] = False
+ assert_series_equal(result, expected)
+
+ def test_operators_bitwise(self):
+ # GH#9016: support bitwise op for integer types
+ index = list('bca')
+
+ s_tft = Series([True, False, True], index=index)
+ s_fff = Series([False, False, False], index=index)
+ s_tff = Series([True, False, False], index=index)
+ s_empty = Series([])
+
+ # TODO: unused
+ # s_0101 = Series([0, 1, 0, 1])
+
+ s_0123 = Series(range(4), dtype='int64')
+ s_3333 = Series([3] * 4)
+ s_4444 = Series([4] * 4)
+
+ res = s_tft & s_empty
+ expected = s_fff
+ assert_series_equal(res, expected)
+
+ res = s_tft | s_empty
+ expected = s_tft
+ assert_series_equal(res, expected)
+
+ res = s_0123 & s_3333
+ expected = Series(range(4), dtype='int64')
+ assert_series_equal(res, expected)
+
+ res = s_0123 | s_4444
+ expected = Series(range(4, 8), dtype='int64')
+ assert_series_equal(res, expected)
+
+ s_a0b1c0 = Series([1], list('b'))
+
+ res = s_tft & s_a0b1c0
+ expected = s_tff.reindex(list('abc'))
+ assert_series_equal(res, expected)
+
+ res = s_tft | s_a0b1c0
+ expected = s_tft.reindex(list('abc'))
+ assert_series_equal(res, expected)
+
+ n0 = 0
+ res = s_tft & n0
+ expected = s_fff
+ assert_series_equal(res, expected)
+
+ res = s_0123 & n0
+ expected = Series([0] * 4)
+ assert_series_equal(res, expected)
+
+ n1 = 1
+ res = s_tft & n1
+ expected = s_tft
+ assert_series_equal(res, expected)
+
+ res = s_0123 & n1
+ expected = Series([0, 1, 0, 1])
+ assert_series_equal(res, expected)
+
+ s_1111 = Series([1] * 4, dtype='int8')
+ res = s_0123 & s_1111
+ expected = Series([0, 1, 0, 1], dtype='int64')
+ assert_series_equal(res, expected)
+
+ res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
+ expected = Series([1, 1, 3, 3], dtype='int32')
+ assert_series_equal(res, expected)
+
+ with pytest.raises(TypeError):
+ s_1111 & 'a'
+ with pytest.raises(TypeError):
+ s_1111 & ['a', 'b', 'c', 'd']
+ with pytest.raises(TypeError):
+ s_0123 & np.NaN
+ with pytest.raises(TypeError):
+ s_0123 & 3.14
+ with pytest.raises(TypeError):
+ s_0123 & [0.1, 4, 3.14, 2]
+
+ # s_0123 will be all false now because of reindexing like s_tft
+ if compat.PY3:
+ # unable to sort incompatible object via .union.
+ exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
+ with tm.assert_produces_warning(RuntimeWarning):
+ assert_series_equal(s_tft & s_0123, exp)
+ else:
+ exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
+ assert_series_equal(s_tft & s_0123, exp)
+
+ # s_tft will be all false now because of reindexing like s_0123
+ if compat.PY3:
+ # unable to sort incompatible object via .union.
+ exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
+ with tm.assert_produces_warning(RuntimeWarning):
+ assert_series_equal(s_0123 & s_tft, exp)
+ else:
+ exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
+ assert_series_equal(s_0123 & s_tft, exp)
+
+ assert_series_equal(s_0123 & False, Series([False] * 4))
+ assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
+ assert_series_equal(s_0123 & [False], Series([False] * 4))
+ assert_series_equal(s_0123 & (False), Series([False] * 4))
+ assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
+ Series([False] * 4))
+
+ s_ftft = Series([False, True, False, True])
+ assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
+
+ s_abNd = Series(['a', 'b', np.NaN, 'd'])
+ res = s_0123 & s_abNd
+ expected = s_ftft
+ assert_series_equal(res, expected)
+
+ def test_scalar_na_logical_ops_corners(self):
+ s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
+
+ with pytest.raises(TypeError):
+ s & datetime(2005, 1, 1)
+
+ s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
+ s[::2] = np.nan
+
+ expected = Series(True, index=s.index)
+ expected[::2] = False
+ result = s & list(s)
+ assert_series_equal(result, expected)
+
+ d = DataFrame({'A': s})
+ # TODO: Fix this exception - needs to be fixed! (see GH5035)
+ # (previously this was a TypeError because series returned
+ # NotImplemented
+
+ # this is an alignment issue; these are equivalent
+ # https://github.com/pandas-dev/pandas/issues/5284
+
+ with pytest.raises(TypeError):
+ d.__and__(s, axis='columns')
+
+ with pytest.raises(TypeError):
+ s & d
+
+ # this is wrong as its not a boolean result
+ # result = d.__and__(s,axis='index')
+
+ @pytest.mark.parametrize('op', [
+ operator.and_,
+ operator.or_,
+ operator.xor,
+ pytest.param(ops.rand_,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation returns "
+ "Index",
+ raises=AssertionError,
+ strict=True)),
+ pytest.param(ops.ror_,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation raises",
+ raises=ValueError, strict=True)),
+ pytest.param(ops.rxor,
+ marks=pytest.mark.xfail(reason="GH#22092 Index "
+ "implementation raises",
+ raises=TypeError, strict=True))
+ ])
+ def test_logical_ops_with_index(self, op):
+ # GH#22092, GH#19792
+ ser = Series([True, True, False, False])
+ idx1 = Index([True, False, True, False])
+ idx2 = Index([1, 0, 1, 0])
+
+ expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
+
+ result = op(ser, idx1)
+ assert_series_equal(result, expected)
+
+ expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
+ dtype=bool)
+
+ result = op(ser, idx2)
+ assert_series_equal(result, expected)
+
+ def test_logical_ops_label_based(self):
+ # GH#4947
+ # logical ops should be label based
+
+ a = Series([True, False, True], list('bca'))
+ b = Series([False, True, False], list('abc'))
+
+ expected = Series([False, True, False], list('abc'))
+ result = a & b
+ assert_series_equal(result, expected)
+
+ expected = Series([True, True, False], list('abc'))
+ result = a | b
+ assert_series_equal(result, expected)
+
+ expected = Series([True, False, False], list('abc'))
+ result = a ^ b
+ assert_series_equal(result, expected)
+
+ # rhs is bigger
+ a = Series([True, False, True], list('bca'))
+ b = Series([False, True, False, True], list('abcd'))
+
+ expected = Series([False, True, False, False], list('abcd'))
+ result = a & b
+ assert_series_equal(result, expected)
+
+ expected = Series([True, True, False, False], list('abcd'))
+ result = a | b
+ assert_series_equal(result, expected)
+
+ # filling
+
+ # vs empty
+ result = a & Series([])
+ expected = Series([False, False, False], list('bca'))
+ assert_series_equal(result, expected)
+
+ result = a | Series([])
+ expected = Series([True, False, True], list('bca'))
+ assert_series_equal(result, expected)
+
+ # vs non-matching
+ result = a & Series([1], ['z'])
+ expected = Series([False, False, False, False], list('abcz'))
+ assert_series_equal(result, expected)
+
+ result = a | Series([1], ['z'])
+ expected = Series([True, True, False, False], list('abcz'))
+ assert_series_equal(result, expected)
+
+ # identity
+ # we would like s[s|e] == s to hold for any e, whether empty or not
+ for e in [Series([]), Series([1], ['z']),
+ Series(np.nan, b.index), Series(np.nan, a.index)]:
+ result = a[a | e]
+ assert_series_equal(result, a[a])
+
+ for e in [Series(['z'])]:
+ if compat.PY3:
+ with tm.assert_produces_warning(RuntimeWarning):
+ result = a[a | e]
+ else:
+ result = a[a | e]
+ assert_series_equal(result, a[a])
+
+ # vs scalars
+ index = list('bca')
+ t = Series([True, False, True])
+
+ for v in [True, 1, 2]:
+ result = Series([True, False, True], index=index) | v
+ expected = Series([True, True, True], index=index)
+ assert_series_equal(result, expected)
+
+ for v in [np.nan, 'foo']:
+ with pytest.raises(TypeError):
+ t | v
+
+ for v in [False, 0]:
+ result = Series([True, False, True], index=index) | v
+ expected = Series([True, False, True], index=index)
+ assert_series_equal(result, expected)
+
+ for v in [True, 1]:
+ result = Series([True, False, True], index=index) & v
+ expected = Series([True, False, True], index=index)
+ assert_series_equal(result, expected)
+
+ for v in [False, 0]:
+ result = Series([True, False, True], index=index) & v
+ expected = Series([False, False, False], index=index)
+ assert_series_equal(result, expected)
+ for v in [np.nan]:
+ with pytest.raises(TypeError):
+ t & v
+
+ def test_logical_ops_df_compat(self):
+ # GH#1134
+ s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
+ s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
+
+ exp = pd.Series([True, False, False, False],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s1 & s2, exp)
+ assert_series_equal(s2 & s1, exp)
+
+ # True | np.nan => True
+ exp = pd.Series([True, True, True, False],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s1 | s2, exp)
+ # np.nan | True => np.nan, filled with False
+ exp = pd.Series([True, True, False, False],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s2 | s1, exp)
+
+ # DataFrame doesn't fill nan with False
+ exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
+ index=list('ABCD'))
+ assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
+ assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
+
+ exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
+ index=list('ABCD'))
+ assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
+ assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
+
+ # different length
+ s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
+ s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
+
+ exp = pd.Series([True, False, True, False],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s3 & s4, exp)
+ assert_series_equal(s4 & s3, exp)
+
+ # np.nan | True => np.nan, filled with False
+ exp = pd.Series([True, True, True, False],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s3 | s4, exp)
+ # True | np.nan => True
+ exp = pd.Series([True, True, True, True],
+ index=list('ABCD'), name='x')
+ assert_series_equal(s4 | s3, exp)
+
+ exp = pd.DataFrame({'x': [True, False, True, np.nan]},
+ index=list('ABCD'))
+ assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
+ assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
+
+ exp = pd.DataFrame({'x': [True, True, True, np.nan]},
+ index=list('ABCD'))
+ assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
+ assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
+
+
class TestSeriesComparisons(object):
def test_comparisons(self):
left = np.random.randn(10)
@@ -164,22 +519,6 @@ def test_comparison_operators_with_nas(self):
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
- @pytest.mark.parametrize('bool_op', [operator.and_,
- operator.or_, operator.xor])
- def test_bool_operators_with_nas(self, bool_op):
- # boolean &, |, ^ should work with object arrays and propagate NAs
- ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
- ser[::2] = np.nan
-
- mask = ser.isna()
- filled = ser.fillna(ser[0])
-
- result = bool_op(ser < ser[9], ser > ser[3])
-
- expected = bool_op(filled < filled[9], filled > filled[3])
- expected[mask] = False
- assert_series_equal(result, expected)
-
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
@@ -258,104 +597,44 @@ def test_comparison_different_length(self):
with pytest.raises(ValueError):
a == b
- def test_comparison_label_based(self):
-
- # GH 4947
- # comparisons should be label based
-
- a = Series([True, False, True], list('bca'))
- b = Series([False, True, False], list('abc'))
-
- expected = Series([False, True, False], list('abc'))
- result = a & b
- assert_series_equal(result, expected)
-
- expected = Series([True, True, False], list('abc'))
- result = a | b
- assert_series_equal(result, expected)
-
- expected = Series([True, False, False], list('abc'))
- result = a ^ b
- assert_series_equal(result, expected)
-
- # rhs is bigger
- a = Series([True, False, True], list('bca'))
- b = Series([False, True, False, True], list('abcd'))
-
- expected = Series([False, True, False, False], list('abcd'))
- result = a & b
- assert_series_equal(result, expected)
-
- expected = Series([True, True, False, False], list('abcd'))
- result = a | b
- assert_series_equal(result, expected)
-
- # filling
-
- # vs empty
- result = a & Series([])
- expected = Series([False, False, False], list('bca'))
- assert_series_equal(result, expected)
-
- result = a | Series([])
- expected = Series([True, False, True], list('bca'))
- assert_series_equal(result, expected)
+ def test_ne(self):
+ ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
+ expected = [True, True, False, True, True]
+ assert tm.equalContents(ts.index != 5, expected)
+ assert tm.equalContents(~(ts.index == 5), expected)
- # vs non-matching
- result = a & Series([1], ['z'])
- expected = Series([False, False, False, False], list('abcz'))
- assert_series_equal(result, expected)
+ def test_comp_ops_df_compat(self):
+ # GH 1134
+ s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
+ s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
- result = a | Series([1], ['z'])
- expected = Series([True, True, False, False], list('abcz'))
- assert_series_equal(result, expected)
+ s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
+ s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
- # identity
- # we would like s[s|e] == s to hold for any e, whether empty or not
- for e in [Series([]), Series([1], ['z']),
- Series(np.nan, b.index), Series(np.nan, a.index)]:
- result = a[a | e]
- assert_series_equal(result, a[a])
+ for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
- for e in [Series(['z'])]:
- if compat.PY3:
- with tm.assert_produces_warning(RuntimeWarning):
- result = a[a | e]
- else:
- result = a[a | e]
- assert_series_equal(result, a[a])
+ msg = "Can only compare identically-labeled Series objects"
+ with tm.assert_raises_regex(ValueError, msg):
+ left == right
- # vs scalars
- index = list('bca')
- t = Series([True, False, True])
+ with tm.assert_raises_regex(ValueError, msg):
+ left != right
- for v in [True, 1, 2]:
- result = Series([True, False, True], index=index) | v
- expected = Series([True, True, True], index=index)
- assert_series_equal(result, expected)
+ with tm.assert_raises_regex(ValueError, msg):
+ left < right
- for v in [np.nan, 'foo']:
- with pytest.raises(TypeError):
- t | v
+ msg = "Can only compare identically-labeled DataFrame objects"
+ with tm.assert_raises_regex(ValueError, msg):
+ left.to_frame() == right.to_frame()
- for v in [False, 0]:
- result = Series([True, False, True], index=index) | v
- expected = Series([True, False, True], index=index)
- assert_series_equal(result, expected)
+ with tm.assert_raises_regex(ValueError, msg):
+ left.to_frame() != right.to_frame()
- for v in [True, 1]:
- result = Series([True, False, True], index=index) & v
- expected = Series([True, False, True], index=index)
- assert_series_equal(result, expected)
+ with tm.assert_raises_regex(ValueError, msg):
+ left.to_frame() < right.to_frame()
- for v in [False, 0]:
- result = Series([True, False, True], index=index) & v
- expected = Series([False, False, False], index=index)
- assert_series_equal(result, expected)
- for v in [np.nan]:
- with pytest.raises(TypeError):
- t & v
+class TestSeriesFlexComparisonOps(object):
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
@@ -414,53 +693,17 @@ def test_comparison_flex_alignment_fill(self):
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.ne(right, fill_value=2), exp)
- exp = pd.Series([False, False, True, True], index=list('abcd'))
- assert_series_equal(left.le(right, fill_value=0), exp)
-
- exp = pd.Series([False, False, False, True], index=list('abcd'))
- assert_series_equal(left.lt(right, fill_value=0), exp)
-
- exp = pd.Series([True, True, True, False], index=list('abcd'))
- assert_series_equal(left.ge(right, fill_value=0), exp)
-
- exp = pd.Series([True, True, False, False], index=list('abcd'))
- assert_series_equal(left.gt(right, fill_value=0), exp)
-
- def test_ne(self):
- ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
- expected = [True, True, False, True, True]
- assert tm.equalContents(ts.index != 5, expected)
- assert tm.equalContents(~(ts.index == 5), expected)
-
- def test_comp_ops_df_compat(self):
- # GH 1134
- s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
- s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
-
- s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
- s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
-
- for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
-
- msg = "Can only compare identically-labeled Series objects"
- with tm.assert_raises_regex(ValueError, msg):
- left == right
-
- with tm.assert_raises_regex(ValueError, msg):
- left != right
-
- with tm.assert_raises_regex(ValueError, msg):
- left < right
-
- msg = "Can only compare identically-labeled DataFrame objects"
- with tm.assert_raises_regex(ValueError, msg):
- left.to_frame() == right.to_frame()
+ exp = pd.Series([False, False, True, True], index=list('abcd'))
+ assert_series_equal(left.le(right, fill_value=0), exp)
- with tm.assert_raises_regex(ValueError, msg):
- left.to_frame() != right.to_frame()
+ exp = pd.Series([False, False, False, True], index=list('abcd'))
+ assert_series_equal(left.lt(right, fill_value=0), exp)
- with tm.assert_raises_regex(ValueError, msg):
- left.to_frame() < right.to_frame()
+ exp = pd.Series([True, True, True, False], index=list('abcd'))
+ assert_series_equal(left.ge(right, fill_value=0), exp)
+
+ exp = pd.Series([True, True, False, False], index=list('abcd'))
+ assert_series_equal(left.gt(right, fill_value=0), exp)
class TestDatetimeSeriesArithmetic(object):
@@ -577,12 +820,6 @@ def test_op_method(self, opname, ts):
expected = alt(other, series)
assert_almost_equal(result, expected)
- def test_neg(self):
- assert_series_equal(-self.series, -1 * self.series)
-
- def test_invert(self):
- assert_series_equal(-(self.series < 0), ~(self.series < 0))
-
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
@@ -604,188 +841,6 @@ def test_ops_datetimelike_align(self):
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
- @pytest.mark.parametrize('op', [
- operator.and_,
- operator.or_,
- operator.xor,
- pytest.param(ops.rand_,
- marks=pytest.mark.xfail(reason="GH#22092 Index "
- "implementation returns "
- "Index",
- raises=AssertionError,
- strict=True)),
- pytest.param(ops.ror_,
- marks=pytest.mark.xfail(reason="GH#22092 Index "
- "implementation raises",
- raises=ValueError, strict=True)),
- pytest.param(ops.rxor,
- marks=pytest.mark.xfail(reason="GH#22092 Index "
- "implementation raises",
- raises=TypeError, strict=True))
- ])
- def test_bool_ops_with_index(self, op):
- # GH#22092, GH#19792
- ser = Series([True, True, False, False])
- idx1 = Index([True, False, True, False])
- idx2 = Index([1, 0, 1, 0])
-
- expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
-
- result = op(ser, idx1)
- assert_series_equal(result, expected)
-
- expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
- dtype=bool)
-
- result = op(ser, idx2)
- assert_series_equal(result, expected)
-
- def test_operators_bitwise(self):
- # GH 9016: support bitwise op for integer types
- index = list('bca')
-
- s_tft = Series([True, False, True], index=index)
- s_fff = Series([False, False, False], index=index)
- s_tff = Series([True, False, False], index=index)
- s_empty = Series([])
-
- # TODO: unused
- # s_0101 = Series([0, 1, 0, 1])
-
- s_0123 = Series(range(4), dtype='int64')
- s_3333 = Series([3] * 4)
- s_4444 = Series([4] * 4)
-
- res = s_tft & s_empty
- expected = s_fff
- assert_series_equal(res, expected)
-
- res = s_tft | s_empty
- expected = s_tft
- assert_series_equal(res, expected)
-
- res = s_0123 & s_3333
- expected = Series(range(4), dtype='int64')
- assert_series_equal(res, expected)
-
- res = s_0123 | s_4444
- expected = Series(range(4, 8), dtype='int64')
- assert_series_equal(res, expected)
-
- s_a0b1c0 = Series([1], list('b'))
-
- res = s_tft & s_a0b1c0
- expected = s_tff.reindex(list('abc'))
- assert_series_equal(res, expected)
-
- res = s_tft | s_a0b1c0
- expected = s_tft.reindex(list('abc'))
- assert_series_equal(res, expected)
-
- n0 = 0
- res = s_tft & n0
- expected = s_fff
- assert_series_equal(res, expected)
-
- res = s_0123 & n0
- expected = Series([0] * 4)
- assert_series_equal(res, expected)
-
- n1 = 1
- res = s_tft & n1
- expected = s_tft
- assert_series_equal(res, expected)
-
- res = s_0123 & n1
- expected = Series([0, 1, 0, 1])
- assert_series_equal(res, expected)
-
- s_1111 = Series([1] * 4, dtype='int8')
- res = s_0123 & s_1111
- expected = Series([0, 1, 0, 1], dtype='int64')
- assert_series_equal(res, expected)
-
- res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
- expected = Series([1, 1, 3, 3], dtype='int32')
- assert_series_equal(res, expected)
-
- with pytest.raises(TypeError):
- s_1111 & 'a'
- with pytest.raises(TypeError):
- s_1111 & ['a', 'b', 'c', 'd']
- with pytest.raises(TypeError):
- s_0123 & np.NaN
- with pytest.raises(TypeError):
- s_0123 & 3.14
- with pytest.raises(TypeError):
- s_0123 & [0.1, 4, 3.14, 2]
-
- # s_0123 will be all false now because of reindexing like s_tft
- if compat.PY3:
- # unable to sort incompatible object via .union.
- exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
- with tm.assert_produces_warning(RuntimeWarning):
- assert_series_equal(s_tft & s_0123, exp)
- else:
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
- assert_series_equal(s_tft & s_0123, exp)
-
- # s_tft will be all false now because of reindexing like s_0123
- if compat.PY3:
- # unable to sort incompatible object via .union.
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
- with tm.assert_produces_warning(RuntimeWarning):
- assert_series_equal(s_0123 & s_tft, exp)
- else:
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
- assert_series_equal(s_0123 & s_tft, exp)
-
- assert_series_equal(s_0123 & False, Series([False] * 4))
- assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
- assert_series_equal(s_0123 & [False], Series([False] * 4))
- assert_series_equal(s_0123 & (False), Series([False] * 4))
- assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
- Series([False] * 4))
-
- s_ftft = Series([False, True, False, True])
- assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
-
- s_abNd = Series(['a', 'b', np.NaN, 'd'])
- res = s_0123 & s_abNd
- expected = s_ftft
- assert_series_equal(res, expected)
-
- def test_scalar_na_cmp_corners(self):
- s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
-
- with pytest.raises(TypeError):
- s & datetime(2005, 1, 1)
-
- s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
- s[::2] = np.nan
-
- expected = Series(True, index=s.index)
- expected[::2] = False
- result = s & list(s)
- assert_series_equal(result, expected)
-
- d = DataFrame({'A': s})
- # TODO: Fix this exception - needs to be fixed! (see GH5035)
- # (previously this was a TypeError because series returned
- # NotImplemented
-
- # this is an alignment issue; these are equivalent
- # https://github.com/pandas-dev/pandas/issues/5284
-
- with pytest.raises(TypeError):
- d.__and__(s, axis='columns')
-
- with pytest.raises(TypeError):
- s & d
-
- # this is wrong as its not a boolean result
- # result = d.__and__(s,axis='index')
-
def test_operators_corner(self):
series = self.ts
@@ -934,62 +989,15 @@ def test_idxminmax_with_inf(self):
np.isnan(s.idxmax(skipna=False))
-class TestSeriesOperationsDataFrameCompat(object):
-
- def test_bool_ops_df_compat(self):
- # GH 1134
- s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
- s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
-
- exp = pd.Series([True, False, False, False],
- index=list('ABCD'), name='x')
- assert_series_equal(s1 & s2, exp)
- assert_series_equal(s2 & s1, exp)
-
- # True | np.nan => True
- exp = pd.Series([True, True, True, False],
- index=list('ABCD'), name='x')
- assert_series_equal(s1 | s2, exp)
- # np.nan | True => np.nan, filled with False
- exp = pd.Series([True, True, False, False],
- index=list('ABCD'), name='x')
- assert_series_equal(s2 | s1, exp)
-
- # DataFrame doesn't fill nan with False
- exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
- index=list('ABCD'))
- assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
- assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
-
- exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
- index=list('ABCD'))
- assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
- assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
-
- # different length
- s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
- s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
-
- exp = pd.Series([True, False, True, False],
- index=list('ABCD'), name='x')
- assert_series_equal(s3 & s4, exp)
- assert_series_equal(s4 & s3, exp)
-
- # np.nan | True => np.nan, filled with False
- exp = pd.Series([True, True, True, False],
- index=list('ABCD'), name='x')
- assert_series_equal(s3 | s4, exp)
- # True | np.nan => True
- exp = pd.Series([True, True, True, True],
- index=list('ABCD'), name='x')
- assert_series_equal(s4 | s3, exp)
+class TestSeriesUnaryOps(object):
+ # __neg__, __pos__, __inv__
- exp = pd.DataFrame({'x': [True, False, True, np.nan]},
- index=list('ABCD'))
- assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
- assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
+ def test_neg(self):
+ ser = tm.makeStringSeries()
+ ser.name = 'series'
+ assert_series_equal(-ser, -1 * ser)
- exp = pd.DataFrame({'x': [True, True, True, np.nan]},
- index=list('ABCD'))
- assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
- assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
+ def test_invert(self):
+ ser = tm.makeStringSeries()
+ ser.name = 'series'
+ assert_series_equal(-(ser < 0), ~(ser < 0))
| Collect tests for logical ops |, &, ^.
Use fixtures in a couple of places in tests/arithmetic
Rename a couple of poorly named tests
No logic is changed, nothing is moved cross-module. | https://api.github.com/repos/pandas-dev/pandas/pulls/23029 | 2018-10-07T18:46:13Z | 2018-10-07T21:41:20Z | 2018-10-07T21:41:20Z | 2018-10-07T22:26:33Z |
Accepts integer/float string with units and raises when unit is ambiguous (2) | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 4bc50695e1ecd..0c74551e9d2c1 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1303,6 +1303,7 @@ Deprecations
- :func:`pandas.api.types.is_period` is deprecated in favor of `pandas.api.types.is_period_dtype` (:issue:`23917`)
- :func:`pandas.api.types.is_datetimetz` is deprecated in favor of `pandas.api.types.is_datetime64tz` (:issue:`23917`)
- Creating a :class:`TimedeltaIndex`, :class:`DatetimeIndex`, or :class:`PeriodIndex` by passing range arguments `start`, `end`, and `periods` is deprecated in favor of :func:`timedelta_range`, :func:`date_range`, or :func:`period_range` (:issue:`23919`)
+- A timedelta passed a number string without a defined unit is deprecated (:issue:`12136`)
- Passing a string alias like ``'datetime64[ns, UTC]'`` as the ``unit`` parameter to :class:`DatetimeTZDtype` is deprecated. Use :class:`DatetimeTZDtype.construct_from_string` instead (:issue:`23990`).
- The ``skipna`` parameter of :meth:`~pandas.api.types.infer_dtype` will switch to ``True`` by default in a future version of pandas (:issue:`17066`, :issue:`24050`)
- In :meth:`Series.where` with Categorical data, providing an ``other`` that is not present in the categories is deprecated. Convert the categorical to a different dtype or add the ``other`` to the categories first (:issue:`24077`).
@@ -1562,6 +1563,7 @@ Timedelta
- Fixed bug in adding a :class:`DataFrame` with all-`timedelta64[ns]` dtypes to a :class:`DataFrame` with all-integer dtypes returning incorrect results instead of raising ``TypeError`` (:issue:`22696`)
- Bug in :class:`TimedeltaIndex` where adding a timezone-aware datetime scalar incorrectly returned a timezone-naive :class:`DatetimeIndex` (:issue:`23215`)
- Bug in :class:`TimedeltaIndex` where adding ``np.timedelta64('NaT')`` incorrectly returned an all-``NaT`` :class:`DatetimeIndex` instead of an all-``NaT`` :class:`TimedeltaIndex` (:issue:`23215`)
+- Bug in :class:`Timedelta` (and :func: `to_timedelta`) where passing a string of a pure number would not take the unit into account. Now raises for an ambiguous or duplicate unit specification.(:issue:`12136`)
- Bug in :class:`Timedelta` and :func:`to_timedelta()` have inconsistencies in supported unit string (:issue:`21762`)
- Bug in :class:`TimedeltaIndex` division where dividing by another :class:`TimedeltaIndex` raised ``TypeError`` instead of returning a :class:`Float64Index` (:issue:`23829`, :issue:`22631`)
- Bug in :class:`TimedeltaIndex` comparison operations where comparing against non-``Timedelta``-like objects would raise ``TypeError`` instead of returning all-``False`` for ``__eq__`` and all-``True`` for ``__ne__`` (:issue:`24056`)
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index c02a840281266..b5732ee670ee8 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -3,6 +3,7 @@
from numpy cimport int64_t
# Exposed for tslib, not intended for outside use.
+cpdef parse_timedelta_string(object ts, object specified_unit=*)
cdef int64_t cast_from_unit(object ts, object unit) except? -1
cpdef int64_t delta_to_nanoseconds(delta) except? -1
-cpdef convert_to_timedelta64(object ts, object unit)
+cpdef convert_to_timedelta64(object ts, object unit=*)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 037e7de27adc3..828ea22d7bbf1 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -148,7 +148,7 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
raise TypeError(type(delta))
-cpdef convert_to_timedelta64(object ts, object unit):
+cpdef convert_to_timedelta64(object ts, object unit=None):
"""
Convert an incoming object to a timedelta64 if possible.
Before calling, unit must be standardized to avoid repeated unit conversion
@@ -162,6 +162,8 @@ cpdef convert_to_timedelta64(object ts, object unit):
Return an ns based int64
"""
+ if unit is None:
+ unit = 'ns'
if checknull_with_nat(ts):
return np.timedelta64(NPY_NAT)
elif isinstance(ts, Timedelta):
@@ -211,7 +213,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
@cython.boundscheck(False)
@cython.wraparound(False)
-def array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
+def array_to_timedelta64(object[:] values, unit=None, errors='raise'):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
coerce non-convertible objects to NaT. Otherwise, raise.
@@ -234,7 +236,7 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
# this is where all of the error handling will take place.
try:
for i in range(n):
- result[i] = parse_timedelta_string(values[i])
+ result[i] = parse_timedelta_string(values[i], specified_unit=unit)
except:
unit = parse_timedelta_unit(unit)
for i in range(n):
@@ -310,7 +312,7 @@ cdef inline _decode_if_necessary(object ts):
return ts
-cdef inline parse_timedelta_string(object ts):
+cpdef inline parse_timedelta_string(object ts, specified_unit=None):
"""
Parse a regular format timedelta string. Return an int64_t (in ns)
or raise a ValueError on an invalid parse.
@@ -424,6 +426,17 @@ cdef inline parse_timedelta_string(object ts):
have_value = 1
have_dot = 0
+ # Consider units from outside
+ if not unit:
+ if specified_unit:
+ unit = [specified_unit]
+ else:
+ if specified_unit:
+ raise ValueError(
+ "units were doubly specified, both as an argument ({})"
+ " and inside string ({})".format(specified_unit, unit)
+ )
+
# we had a dot, but we have a fractional
# value since we have an unit
if have_dot and len(unit):
@@ -465,14 +478,17 @@ cdef inline parse_timedelta_string(object ts):
else:
raise ValueError("unit abbreviation w/o a number")
- # treat as nanoseconds
- # but only if we don't have anything else
+ # raise if we just have a number without units
else:
if have_value:
raise ValueError("have leftover units")
if len(number):
- r = timedelta_from_spec(number, frac, 'ns')
- result += timedelta_as_neg(r, neg)
+ warnings.warn(
+ "number string without units is deprecated and"
+ " will raise an exception in future versions. Considering as nanoseconds.",
+ FutureWarning
+ )
+ result = timedelta_from_spec(number, frac, 'ns')
return result
@@ -521,10 +537,12 @@ cpdef inline object parse_timedelta_unit(object unit):
----------
unit : an unit string
"""
- if unit is None:
- return 'ns'
- elif unit == 'M':
+
+ # Preserve unit if None, will be cast to nanoseconds
+ # later on at the proper functions
+ if unit is None or unit == 'M':
return unit
+
try:
return timedelta_abbrevs[unit.lower()]
except (KeyError, AttributeError):
@@ -1167,7 +1185,7 @@ class Timedelta(_Timedelta):
if len(value) > 0 and value[0] == 'P':
value = parse_iso_format_string(value)
else:
- value = parse_timedelta_string(value)
+ value = parse_timedelta_string(value, specified_unit=unit)
value = np.timedelta64(value)
elif PyDelta_Check(value):
value = convert_to_timedelta64(value, 'ns')
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1ec37c9f228a6..c35b05ce9f9c2 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -874,6 +874,8 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
# treat as multiples of the given unit. If after converting to nanos,
# there are fractional components left, these are truncated
# (i.e. NOT rounded)
+ if unit is None:
+ unit = "ns"
mask = np.isnan(data)
coeff = np.timedelta64(1, unit) / np.timedelta64(1, 'ns')
data = (coeff * data).astype(np.int64).view('timedelta64[ns]')
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 6bcf56c306e6a..c21c5b6930911 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -6,7 +6,10 @@
from pandas._libs import tslibs
from pandas._libs.tslibs.timedeltas import (
- convert_to_timedelta64, parse_timedelta_unit)
+ convert_to_timedelta64,
+ parse_timedelta_string,
+ parse_timedelta_unit,
+)
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
@@ -15,7 +18,7 @@
from pandas.core.arrays.timedeltas import sequence_to_td64ns
-def to_timedelta(arg, unit='ns', box=True, errors='raise'):
+def to_timedelta(arg, unit=None, box=True, errors='raise'):
"""
Convert argument to timedelta.
@@ -116,26 +119,30 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'):
box=box, errors=errors)
-def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
+def _coerce_scalar_to_timedelta_type(r, unit=None, box=True, errors='raise'):
"""Convert string 'r' to a timedelta object."""
try:
- result = convert_to_timedelta64(r, unit)
- except ValueError:
- if errors == 'raise':
- raise
- elif errors == 'ignore':
- return r
-
- # coerce
- result = pd.NaT
+ result = parse_timedelta_string(r, unit)
+ result = np.timedelta64(result)
+ except (ValueError, TypeError):
+ try:
+ result = convert_to_timedelta64(r, unit)
+ except ValueError:
+ if errors == 'raise':
+ raise
+ elif errors == 'ignore':
+ return r
+
+ # coerce
+ result = pd.NaT
if box:
result = tslibs.Timedelta(result)
return result
-def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
+def _convert_listlike(arg, unit=None, box=True, errors='raise', name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'):
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index f14ecae448723..8ad3362ab1b08 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -298,7 +298,7 @@ class TestFrameFlexArithmetic(object):
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range('2016-01-01', periods=10)
- tdi = pd.timedelta_range('1', periods=10)
+ tdi = pd.timedelta_range('1ns', periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 97898dd8942f8..5bac697d5767f 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -234,7 +234,7 @@ def test_drop_duplicates(self):
'T', '2T', 'S', '-3S'])
def test_infer_freq(self, freq):
# GH#11018
- idx = pd.timedelta_range('1', freq=freq, periods=10)
+ idx = pd.timedelta_range('1ns', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index 62bf2a0b4a1cf..2134a1c9034a5 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -51,7 +51,8 @@ def test_partial_slice_high_reso(self):
assert result == s.iloc[1001]
def test_slice_with_negative_step(self):
- ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
+ ts = Series(np.arange(20),
+ timedelta_range('0ns', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
@@ -76,7 +77,8 @@ def assert_slices_equivalent(l_slc, i_slc):
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
- ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
+ ts = Series(np.arange(20),
+ timedelta_range('0ns', periods=20, freq='H'))
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index c78ab41d2fae4..430eca276b0ad 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1400,7 +1400,7 @@ def test_format_timedelta_ticks_narrow(self):
'00:00:00.00000000{:d}'.format(2 * i)
for i in range(5)] + ['']
- rng = timedelta_range('0', periods=10, freq='ns')
+ rng = timedelta_range('0ns', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
@@ -1431,7 +1431,7 @@ def test_format_timedelta_ticks_wide(self):
expected_labels = expected_labels[1:-1]
expected_labels[-1] = ''
- rng = timedelta_range('0', periods=10, freq='1 d')
+ rng = timedelta_range('0ns', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
diff --git a/pandas/tests/scalar/timedelta/test_construction.py b/pandas/tests/scalar/timedelta/test_construction.py
index 880eca914749b..71e17344f02de 100644
--- a/pandas/tests/scalar/timedelta/test_construction.py
+++ b/pandas/tests/scalar/timedelta/test_construction.py
@@ -5,6 +5,7 @@
import pytest
from pandas import Timedelta, offsets, to_timedelta
+from pandas.util.testing import do_not_raise
def test_construction():
@@ -83,10 +84,6 @@ def test_construction():
with pytest.raises(ValueError):
Timedelta('10 days -1 h 1.5m 1s 3us')
- # no units specified
- with pytest.raises(ValueError):
- Timedelta('3.1415')
-
# invalid construction
with pytest.raises(ValueError, match="cannot construct a Timedelta"):
Timedelta()
@@ -208,3 +205,47 @@ def test_td_constructor_on_nanoseconds(constructed_td, conversion):
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds='abc')
+
+
+@pytest.mark.parametrize("value", [
+ 3.1415, # Number with decimals (original test)
+ 10, # Integer number, did not raise before
+])
+@pytest.mark.parametrize("str_unit, unit, expectation", [
+ # Expected case
+ ("",
+ "s",
+ do_not_raise),
+
+ # Units doubly defined
+ ("s",
+ "d",
+ pytest.raises(ValueError,
+ message="units were doubly specified, "
+ "both as an argument (d) and inside string (s)")
+ ),
+
+ # Units doubly defined (same)
+ ("s",
+ "s",
+ pytest.raises(ValueError,
+ message="units were doubly specified, "
+ "both as an argument (s) and inside string (s)")),
+
+ # No units
+ ("",
+ None,
+ pytest.warns(DeprecationWarning,
+ message="number string without units is deprecated and "
+ " will raise an exception in future versions. "
+ "Considering as nanoseconds.")),
+])
+def test_string_with_unit(value, str_unit, unit, expectation):
+ with expectation:
+ val_str = "{}{}".format(value, str_unit)
+ expected_td = Timedelta(value, unit=unit)
+
+ assert Timedelta(val_str, unit=unit) == expected_td
+ assert to_timedelta(val_str, unit=unit) == expected_td
+ assert all(to_timedelta([val_str, val_str], unit=unit) ==
+ to_timedelta([expected_td, expected_td]))
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index db0c848eaeb4b..9c49668363c0e 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -472,12 +472,9 @@ def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
- assert ct('10') == np.timedelta64(10, 'ns')
assert ct('10ns') == np.timedelta64(10, 'ns')
- assert ct('100') == np.timedelta64(100, 'ns')
assert ct('100ns') == np.timedelta64(100, 'ns')
- assert ct('1000') == np.timedelta64(1000, 'ns')
assert ct('1000ns') == np.timedelta64(1000, 'ns')
assert ct('1000NS') == np.timedelta64(1000, 'ns')
@@ -614,7 +611,7 @@ def test_implementation_limits(self):
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta('30S').total_seconds() == 30.0
- assert Timedelta('0').total_seconds() == 0.0
+ assert Timedelta('0ns').total_seconds() == 0.0
assert Timedelta('-2S').total_seconds() == -2.0
assert Timedelta('5.324S').total_seconds() == 5.324
assert (Timedelta('30S').total_seconds() - 30.0) < 1e-20
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index d36de931e2610..a9df403b0176d 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -18,7 +18,7 @@
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
- Series(pd.timedelta_range("2000", periods=9))])
+ Series(pd.timedelta_range("2000ns", periods=9))])
def series(request):
return request.param
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 2df43cd678764..ab4543a718590 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -43,6 +43,42 @@
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
+
+class NullContextManager(object):
+ """No-op context manager (does nothing).
+
+ Mainly used for defining ``do_not_raise`` context manager,
+ for pytest tests where we are required to parametrize on
+ whether we should raise something or not:
+
+ Example
+ -------
+
+ >>> import pytest
+ >>> from pytest import raises
+ >>> from pandas.util.testing import do_not_raise
+ >>> @pytest.mark.parametrize("input, expectation", [
+ ... (1, do_not_raise),
+ ... ("a", raises(ValueError))
+ ... ])
+ >>> def test_convert_number(input, expectation):
+ ... with expectation:
+ ... float(input)
+
+ """
+ def __init__(self, dummy_resource=None):
+ self.dummy_resource = dummy_resource
+
+ def __enter__(self):
+ return self.dummy_resource
+
+ def __exit__(self, *args):
+ pass
+
+
+do_not_raise = NullContextManager()
+
+
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
| Supersedes #21384.
- [x] closes #12136
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/23025 | 2018-10-07T16:26:53Z | 2019-05-03T05:38:27Z | null | 2019-05-03T05:38:28Z |
DOC: Upgraded Docstring pandas.DataFrame.dot | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f7f1855a4fabc..46651ee83d6f2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -931,16 +931,70 @@ def __len__(self):
def dot(self, other):
"""
- Matrix multiplication with DataFrame or Series objects. Can also be
- called using `self @ other` in Python >= 3.5.
+ Compute the matrix mutiplication between the DataFrame and other.
+
+ This method computes the matrix product between the DataFrame and the
+ values of an other Series, DataFrame or a numpy array.
+
+ It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
- other : DataFrame or Series
+ other : Series, DataFrame or array-like
+ The other object to compute the matrix product with.
Returns
-------
- dot_product : DataFrame or Series
+ Series or DataFrame
+ If other is a Series, return the matrix product between self and
+ other as a Serie. If other is a DataFrame or a numpy.array, return
+ the matrix product of self and other in a DataFrame of a np.array.
+
+ See Also
+ --------
+ Series.dot: Similar method for Series.
+
+ Notes
+ -----
+ The dimensions of DataFrame and other must be compatible in order to
+ compute the matrix multiplication.
+
+ The dot method for Series computes the inner product, instead of the
+ matrix product here.
+
+ Examples
+ --------
+ Here we multiply a DataFrame with a Series.
+
+ >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
+ >>> s = pd.Series([1, 1, 2, 1])
+ >>> df.dot(s)
+ 0 -4
+ 1 5
+ dtype: int64
+
+ Here we multiply a DataFrame with another DataFrame.
+
+ >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
+ >>> df.dot(other)
+ 0 1
+ 0 1 4
+ 1 2 2
+
+ Note that the dot method give the same result as @
+
+ >>> df @ other
+ 0 1
+ 0 1 4
+ 1 2 2
+
+ The dot method works also if other is an np.array.
+
+ >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
+ >>> df.dot(arr)
+ 0 1
+ 0 1 4
+ 1 2 2
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23024 | 2018-10-07T11:08:44Z | 2018-12-19T12:31:20Z | 2018-12-19T12:31:20Z | 2018-12-19T12:31:24Z |
REF: Fuse all the types | diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index 6bcc735656c6b..b39b5eaced8fd 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -16,33 +16,30 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
-# name, c_type, dest_type, dest_dtype
-dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'),
- ('float32', 'float32_t', 'float32_t', 'np.float32'),
- ('int8', 'int8_t', 'float32_t', 'np.float32'),
- ('int16', 'int16_t', 'float32_t', 'np.float32'),
- ('int32', 'int32_t', 'float64_t', 'np.float64'),
- ('int64', 'int64_t', 'float64_t', 'np.float64')]
+# name, c_type, dest_type
+dtypes = [('float64', 'float64_t', 'float64_t'),
+ ('float32', 'float32_t', 'float32_t'),
+ ('int8', 'int8_t', 'float32_t'),
+ ('int16', 'int16_t', 'float32_t'),
+ ('int32', 'int32_t', 'float64_t'),
+ ('int64', 'int64_t', 'float64_t')]
def get_dispatch(dtypes):
- for name, c_type, dest_type, dest_dtype, in dtypes:
-
- dest_type2 = dest_type
- dest_type = dest_type.replace('_t', '')
-
- yield name, c_type, dest_type, dest_type2, dest_dtype
+ for name, c_type, dest_type, in dtypes:
+ dest_name = dest_type[:-2] # i.e. strip "_t"
+ yield name, c_type, dest_type, dest_name
}}
-{{for name, c_type, dest_type, dest_type2, dest_dtype
+{{for name, c_type, dest_type, dest_name
in get_dispatch(dtypes)}}
@cython.boundscheck(False)
@cython.wraparound(False)
def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr,
- ndarray[{{dest_type2}}, ndim=2] out,
+ ndarray[{{dest_type}}, ndim=2] out,
Py_ssize_t periods, int axis):
cdef:
Py_ssize_t i, j, sx, sy
@@ -84,9 +81,9 @@ def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr,
out[i, j] = arr[i, j] - arr[i, j - periods]
-def put2d_{{name}}_{{dest_type}}(ndarray[{{c_type}}, ndim=2, cast=True] values,
+def put2d_{{name}}_{{dest_name}}(ndarray[{{c_type}}, ndim=2, cast=True] values,
ndarray[int64_t] indexer, Py_ssize_t loc,
- ndarray[{{dest_type2}}] out):
+ ndarray[{{dest_type}}] out):
cdef:
Py_ssize_t i, j, k
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index 130276ae0e73c..bb4aec75ed567 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -131,45 +131,20 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average',
argsorted = _as.astype('i8')
{{if dtype == 'object'}}
- for i in range(n):
- sum_ranks += i + 1
- dups += 1
- isnan = sorted_mask[i]
- val = util.get_value_at(sorted_data, i)
-
- if isnan and keep_na:
- ranks[argsorted[i]] = nan
- continue
- count += 1.0
-
- if (i == n - 1 or
- are_diff(util.get_value_at(sorted_data, i + 1), val) or
- i == non_na_idx):
- if tiebreak == TIEBREAK_AVERAGE:
- for j in range(i - dups + 1, i + 1):
- ranks[argsorted[j]] = sum_ranks / dups
- elif tiebreak == TIEBREAK_MIN:
- for j in range(i - dups + 1, i + 1):
- ranks[argsorted[j]] = i - dups + 2
- elif tiebreak == TIEBREAK_MAX:
- for j in range(i - dups + 1, i + 1):
- ranks[argsorted[j]] = i + 1
- elif tiebreak == TIEBREAK_FIRST:
- raise ValueError('first not supported for non-numeric data')
- elif tiebreak == TIEBREAK_FIRST_DESCENDING:
- for j in range(i - dups + 1, i + 1):
- ranks[argsorted[j]] = 2 * i - j - dups + 2
- elif tiebreak == TIEBREAK_DENSE:
- total_tie_count += 1
- for j in range(i - dups + 1, i + 1):
- ranks[argsorted[j]] = total_tie_count
- sum_ranks = dups = 0
+ if True:
{{else}}
with nogil:
+ {{endif}}
+ # TODO: why does the 2d version not have a nogil block?
for i in range(n):
sum_ranks += i + 1
dups += 1
+
+ {{if dtype == 'object'}}
+ val = util.get_value_at(sorted_data, i)
+ {{else}}
val = sorted_data[i]
+ {{endif}}
{{if dtype != 'uint64'}}
isnan = sorted_mask[i]
@@ -180,8 +155,14 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average',
count += 1.0
- if (i == n - 1 or sorted_data[i + 1] != val or
- i == non_na_idx):
+ {{if dtype == 'object'}}
+ if (i == n - 1 or
+ are_diff(util.get_value_at(sorted_data, i + 1), val) or
+ i == non_na_idx):
+ {{else}}
+ if (i == n - 1 or sorted_data[i + 1] != val or i == non_na_idx):
+ {{endif}}
+
if tiebreak == TIEBREAK_AVERAGE:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = sum_ranks / dups
@@ -192,8 +173,13 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average',
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = i + 1
elif tiebreak == TIEBREAK_FIRST:
+ {{if dtype == 'object'}}
+ raise ValueError('first not supported for '
+ 'non-numeric data')
+ {{else}}
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = j + 1
+ {{endif}}
elif tiebreak == TIEBREAK_FIRST_DESCENDING:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = 2 * i - j - dups + 2
@@ -202,7 +188,6 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average',
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = total_tie_count
sum_ranks = dups = 0
- {{endif}}
if pct:
if tiebreak == TIEBREAK_DENSE:
return ranks / total_tie_count
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 5b01117381a27..addbb2b3e8165 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -14,26 +14,22 @@ _int64_max = np.iinfo(np.int64).max
{{py:
-# name, c_type, dest_type, dest_dtype
-dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'),
- ('float32', 'float32_t', 'float32_t', 'np.float32')]
+# name, c_type
+dtypes = [('float64', 'float64_t'),
+ ('float32', 'float32_t')]
def get_dispatch(dtypes):
- for name, c_type, dest_type, dest_dtype in dtypes:
-
- dest_type2 = dest_type
- dest_type = dest_type.replace('_t', '')
-
- yield name, c_type, dest_type, dest_type2, dest_dtype
+ for name, c_type in dtypes:
+ yield name, c_type
}}
-{{for name, c_type, dest_type, dest_type2, dest_dtype in get_dispatch(dtypes)}}
+{{for name, c_type in get_dispatch(dtypes)}}
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
@@ -43,8 +39,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] sumx, nobs
+ {{c_type}} val, count
+ ndarray[{{c_type}}, ndim=2] sumx, nobs
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
@@ -80,7 +76,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
@@ -90,8 +86,8 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] prodx, nobs
+ {{c_type}} val, count
+ ndarray[{{c_type}}, ndim=2] prodx, nobs
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
@@ -127,15 +123,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
-def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
- ndarray[{{dest_type2}}, ndim=2] values,
+ ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
Py_ssize_t min_count=-1):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, ct, oldmean
- ndarray[{{dest_type2}}, ndim=2] nobs, mean
+ {{c_type}} val, ct, oldmean
+ ndarray[{{c_type}}, ndim=2] nobs, mean
assert min_count == -1, "'min_count' only used in add and prod"
@@ -179,15 +175,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
- ndarray[{{dest_type2}}, ndim=2] values,
+ ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
Py_ssize_t min_count=-1):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] sumx, nobs
+ {{c_type}} val, count
+ ndarray[{{c_type}}, ndim=2] sumx, nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -224,9 +220,9 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_ohlc_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
- ndarray[{{dest_type2}}, ndim=2] values,
+ ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
Py_ssize_t min_count=-1):
"""
@@ -234,7 +230,7 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab
- {{dest_type2}} val, count
+ {{c_type}} val, count
Py_ssize_t ngroups = len(counts)
assert min_count == -1, "'min_count' only used in add and prod"
@@ -278,26 +274,26 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
{{py:
-# name, c_type, dest_type2, nan_val
-dtypes = [('float64', 'float64_t', 'float64_t', 'NAN'),
- ('float32', 'float32_t', 'float32_t', 'NAN'),
- ('int64', 'int64_t', 'int64_t', 'iNaT'),
- ('object', 'object', 'object', 'NAN')]
+# name, c_type, nan_val
+dtypes = [('float64', 'float64_t', 'NAN'),
+ ('float32', 'float32_t', 'NAN'),
+ ('int64', 'int64_t', 'iNaT'),
+ ('object', 'object', 'NAN')]
def get_dispatch(dtypes):
- for name, c_type, dest_type2, nan_val in dtypes:
+ for name, c_type, nan_val in dtypes:
- yield name, c_type, dest_type2, nan_val
+ yield name, c_type, nan_val
}}
-{{for name, c_type, dest_type2, nan_val in get_dispatch(dtypes)}}
+{{for name, c_type, nan_val in get_dispatch(dtypes)}}
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels,
@@ -307,8 +303,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val
- ndarray[{{dest_type2}}, ndim=2] resx
+ {{c_type}} val
+ ndarray[{{c_type}}, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -354,7 +350,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
+def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
ndarray[int64_t] labels, int64_t rank,
@@ -364,8 +360,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val
- ndarray[{{dest_type2}}, ndim=2] resx
+ {{c_type}} val
+ ndarray[{{c_type}}, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -473,7 +469,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# with mask, without obfuscating location of missing data
# in values array
masked_vals = np.array(values[:, 0], copy=True)
- {{if name=='int64'}}
+ {{if name == 'int64'}}
mask = (masked_vals == {{nan_val}}).astype(np.uint8)
{{else}}
mask = np.isnan(masked_vals).astype(np.uint8)
@@ -597,41 +593,31 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
{{endfor}}
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# group_min, group_max
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
-{{py:
-
-# name, c_type, dest_type2, nan_val
-dtypes = [('float64', 'float64_t', 'NAN', 'np.inf'),
- ('float32', 'float32_t', 'NAN', 'np.inf'),
- ('int64', 'int64_t', 'iNaT', '_int64_max')]
-
-def get_dispatch(dtypes):
-
- for name, dest_type2, nan_val, inf_val in dtypes:
- yield name, dest_type2, nan_val, inf_val
-}}
-
-
-{{for name, dest_type2, nan_val, inf_val in get_dispatch(dtypes)}}
+# TODO: consider implementing for more dtypes
+ctypedef fused groupby_t:
+ float64_t
+ float32_t
+ int64_t
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels,
- Py_ssize_t min_count=-1):
+def group_max(ndarray[groupby_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[groupby_t, ndim=2] values,
+ ndarray[int64_t] labels,
+ Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] maxx, nobs
+ groupby_t val, count, nan_val
+ ndarray[groupby_t, ndim=2] maxx, nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -641,7 +627,13 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
nobs = np.zeros_like(out)
maxx = np.empty_like(out)
- maxx.fill(-{{inf_val}})
+ if groupby_t is int64_t:
+ # Note: evaluated at compile-time
+ maxx.fill(-_int64_max)
+ nan_val = iNaT
+ else:
+ maxx.fill(-np.inf)
+ nan_val = NAN
N, K = (<object> values).shape
@@ -656,37 +648,44 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
val = values[i, j]
# not nan
- {{if name == 'int64'}}
- if val != {{nan_val}}:
- {{else}}
- if val == val and val != {{nan_val}}:
- {{endif}}
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if groupby_t is int64_t:
+ if val != nan_val:
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
+ else:
+ if val == val and val != nan_val:
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
- out[i, j] = {{nan_val}}
+ out[i, j] = nan_val
else:
out[i, j] = maxx[i, j]
+group_max_float64 = group_max["float64_t"]
+group_max_float32 = group_max["float32_t"]
+group_max_int64 = group_max["int64_t"]
+
+
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels,
- Py_ssize_t min_count=-1):
+def group_min(ndarray[groupby_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[groupby_t, ndim=2] values,
+ ndarray[int64_t] labels,
+ Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] minx, nobs
+ groupby_t val, count, nan_val
+ ndarray[groupby_t, ndim=2] minx, nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -696,7 +695,12 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
nobs = np.zeros_like(out)
minx = np.empty_like(out)
- minx.fill({{inf_val}})
+ if groupby_t is int64_t:
+ minx.fill(_int64_max)
+ nan_val = iNaT
+ else:
+ minx.fill(np.inf)
+ nan_val = NAN
N, K = (<object> values).shape
@@ -711,41 +715,51 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
val = values[i, j]
# not nan
- {{if name == 'int64'}}
- if val != {{nan_val}}:
- {{else}}
- if val == val and val != {{nan_val}}:
- {{endif}}
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
+ if groupby_t is int64_t:
+ if val != nan_val:
+ nobs[lab, j] += 1
+ if val < minx[lab, j]:
+ minx[lab, j] = val
+ else:
+ if val == val and val != nan_val:
+ nobs[lab, j] += 1
+ if val < minx[lab, j]:
+ minx[lab, j] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
- out[i, j] = {{nan_val}}
+ out[i, j] = nan_val
else:
out[i, j] = minx[i, j]
+group_min_float64 = group_min["float64_t"]
+group_min_float32 = group_min["float32_t"]
+group_min_int64 = group_min["int64_t"]
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
-def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
- ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels,
- bint is_datetimelike):
+def group_cummin(ndarray[groupby_t, ndim=2] out,
+ ndarray[groupby_t, ndim=2] values,
+ ndarray[int64_t] labels,
+ bint is_datetimelike):
"""
Only transforms on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, size
- {{dest_type2}} val, mval
- ndarray[{{dest_type2}}, ndim=2] accum
+ groupby_t val, mval
+ ndarray[groupby_t, ndim=2] accum
int64_t lab
N, K = (<object> values).shape
accum = np.empty_like(values)
- accum.fill({{inf_val}})
+ if groupby_t is int64_t:
+ accum.fill(_int64_max)
+ else:
+ accum.fill(np.inf)
with nogil:
for i in range(N):
@@ -757,37 +771,48 @@ def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
val = values[i, j]
# val = nan
- {{if name == 'int64'}}
- if is_datetimelike and val == {{nan_val}}:
- out[i, j] = {{nan_val}}
+ if groupby_t is int64_t:
+ if is_datetimelike and val == iNaT:
+ out[i, j] = iNaT
+ else:
+ mval = accum[lab, j]
+ if val < mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
else:
- {{else}}
- if val == val:
- {{endif}}
- mval = accum[lab, j]
- if val < mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ if val == val:
+ mval = accum[lab, j]
+ if val < mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
+
+
+group_cummin_float64 = group_cummin["float64_t"]
+group_cummin_float32 = group_cummin["float32_t"]
+group_cummin_int64 = group_cummin["int64_t"]
@cython.boundscheck(False)
@cython.wraparound(False)
-def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
- ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels,
- bint is_datetimelike):
+def group_cummax(ndarray[groupby_t, ndim=2] out,
+ ndarray[groupby_t, ndim=2] values,
+ ndarray[int64_t] labels,
+ bint is_datetimelike):
"""
Only transforms on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, size
- {{dest_type2}} val, mval
- ndarray[{{dest_type2}}, ndim=2] accum
+ groupby_t val, mval
+ ndarray[groupby_t, ndim=2] accum
int64_t lab
N, K = (<object> values).shape
accum = np.empty_like(values)
- accum.fill(-{{inf_val}})
+ if groupby_t is int64_t:
+ accum.fill(-_int64_max)
+ else:
+ accum.fill(-np.inf)
with nogil:
for i in range(N):
@@ -798,16 +823,22 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
for j in range(K):
val = values[i, j]
- {{if name == 'int64'}}
- if is_datetimelike and val == {{nan_val}}:
- out[i, j] = {{nan_val}}
+ if groupby_t is int64_t:
+ if is_datetimelike and val == iNaT:
+ out[i, j] = iNaT
+ else:
+ mval = accum[lab, j]
+ if val > mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
else:
- {{else}}
- if val == val:
- {{endif}}
- mval = accum[lab, j]
- if val > mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ if val == val:
+ mval = accum[lab, j]
+ if val > mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
-{{endfor}}
+
+group_cummax_float64 = group_cummax["float64_t"]
+group_cummax_float32 = group_cummax["float32_t"]
+group_cummax_int64 = group_cummax["int64_t"]
diff --git a/pandas/_libs/join_func_helper.pxi.in b/pandas/_libs/join_func_helper.pxi.in
index 72f24762838b4..b7f604d2fc951 100644
--- a/pandas/_libs/join_func_helper.pxi.in
+++ b/pandas/_libs/join_func_helper.pxi.in
@@ -210,34 +210,34 @@ def asof_join_nearest_{{on_dtype}}_by_{{by_dtype}}(
{{endfor}}
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# asof_join
-#----------------------------------------------------------------------
-
-{{py:
-
-# on_dtype
-dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
- 'int8_t', 'int16_t', 'int32_t', 'int64_t',
- 'float', 'double']
-
-}}
-
-{{for on_dtype in dtypes}}
-
-
-def asof_join_backward_{{on_dtype}}(
- ndarray[{{on_dtype}}] left_values,
- ndarray[{{on_dtype}}] right_values,
- bint allow_exact_matches=1,
- tolerance=None):
+# ----------------------------------------------------------------------
+
+ctypedef fused asof_t:
+ uint8_t
+ uint16_t
+ uint32_t
+ uint64_t
+ int8_t
+ int16_t
+ int32_t
+ int64_t
+ float
+ double
+
+
+def asof_join_backward(ndarray[asof_t] left_values,
+ ndarray[asof_t] right_values,
+ bint allow_exact_matches=1,
+ tolerance=None):
cdef:
Py_ssize_t left_pos, right_pos, left_size, right_size
ndarray[int64_t] left_indexer, right_indexer
bint has_tolerance = 0
- {{on_dtype}} tolerance_ = 0
- {{on_dtype}} diff = 0
+ asof_t tolerance_ = 0
+ asof_t diff = 0
# if we are using tolerance, set our objects
if tolerance is not None:
@@ -280,18 +280,29 @@ def asof_join_backward_{{on_dtype}}(
return left_indexer, right_indexer
-def asof_join_forward_{{on_dtype}}(
- ndarray[{{on_dtype}}] left_values,
- ndarray[{{on_dtype}}] right_values,
- bint allow_exact_matches=1,
- tolerance=None):
+asof_join_backward_uint8_t = asof_join_backward["uint8_t"]
+asof_join_backward_uint16_t = asof_join_backward["uint16_t"]
+asof_join_backward_uint32_t = asof_join_backward["uint32_t"]
+asof_join_backward_uint64_t = asof_join_backward["uint64_t"]
+asof_join_backward_int8_t = asof_join_backward["int8_t"]
+asof_join_backward_int16_t = asof_join_backward["int16_t"]
+asof_join_backward_int32_t = asof_join_backward["int32_t"]
+asof_join_backward_int64_t = asof_join_backward["int64_t"]
+asof_join_backward_float = asof_join_backward["float"]
+asof_join_backward_double = asof_join_backward["double"]
+
+
+def asof_join_forward(ndarray[asof_t] left_values,
+ ndarray[asof_t] right_values,
+ bint allow_exact_matches=1,
+ tolerance=None):
cdef:
Py_ssize_t left_pos, right_pos, left_size, right_size
ndarray[int64_t] left_indexer, right_indexer
bint has_tolerance = 0
- {{on_dtype}} tolerance_ = 0
- {{on_dtype}} diff = 0
+ asof_t tolerance_ = 0
+ asof_t diff = 0
# if we are using tolerance, set our objects
if tolerance is not None:
@@ -335,16 +346,27 @@ def asof_join_forward_{{on_dtype}}(
return left_indexer, right_indexer
-def asof_join_nearest_{{on_dtype}}(
- ndarray[{{on_dtype}}] left_values,
- ndarray[{{on_dtype}}] right_values,
- bint allow_exact_matches=1,
- tolerance=None):
+asof_join_forward_uint8_t = asof_join_forward["uint8_t"]
+asof_join_forward_uint16_t = asof_join_forward["uint16_t"]
+asof_join_forward_uint32_t = asof_join_forward["uint32_t"]
+asof_join_forward_uint64_t = asof_join_forward["uint64_t"]
+asof_join_forward_int8_t = asof_join_forward["int8_t"]
+asof_join_forward_int16_t = asof_join_forward["int16_t"]
+asof_join_forward_int32_t = asof_join_forward["int32_t"]
+asof_join_forward_int64_t = asof_join_forward["int64_t"]
+asof_join_forward_float = asof_join_forward["float"]
+asof_join_forward_double = asof_join_forward["double"]
+
+
+def asof_join_nearest(ndarray[asof_t] left_values,
+ ndarray[asof_t] right_values,
+ bint allow_exact_matches=1,
+ tolerance=None):
cdef:
Py_ssize_t left_size, right_size, i
ndarray[int64_t] left_indexer, right_indexer, bli, bri, fli, fri
- {{on_dtype}} bdiff, fdiff
+ asof_t bdiff, fdiff
left_size = len(left_values)
right_size = len(right_values)
@@ -353,10 +375,10 @@ def asof_join_nearest_{{on_dtype}}(
right_indexer = np.empty(left_size, dtype=np.int64)
# search both forward and backward
- bli, bri = asof_join_backward_{{on_dtype}}(left_values, right_values,
- allow_exact_matches, tolerance)
- fli, fri = asof_join_forward_{{on_dtype}}(left_values, right_values,
- allow_exact_matches, tolerance)
+ bli, bri = asof_join_backward(left_values, right_values,
+ allow_exact_matches, tolerance)
+ fli, fri = asof_join_forward(left_values, right_values,
+ allow_exact_matches, tolerance)
for i in range(len(bri)):
# choose timestamp from right with smaller difference
@@ -370,4 +392,14 @@ def asof_join_nearest_{{on_dtype}}(
return left_indexer, right_indexer
-{{endfor}}
+
+asof_join_nearest_uint8_t = asof_join_nearest["uint8_t"]
+asof_join_nearest_uint16_t = asof_join_nearest["uint16_t"]
+asof_join_nearest_uint32_t = asof_join_nearest["uint32_t"]
+asof_join_nearest_uint64_t = asof_join_nearest["uint64_t"]
+asof_join_nearest_int8_t = asof_join_nearest["int8_t"]
+asof_join_nearest_int16_t = asof_join_nearest["int16_t"]
+asof_join_nearest_int32_t = asof_join_nearest["int32_t"]
+asof_join_nearest_int64_t = asof_join_nearest["int64_t"]
+asof_join_nearest_float = asof_join_nearest["float"]
+asof_join_nearest_double = asof_join_nearest["double"]
diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in
index 2843a3cf7dd28..d02a985de1d61 100644
--- a/pandas/_libs/sparse_op_helper.pxi.in
+++ b/pandas/_libs/sparse_op_helper.pxi.in
@@ -8,18 +8,12 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
# Sparse op
#----------------------------------------------------------------------
-{{py:
-
-# dtype, float_group
-dtypes = [('float64', True), ('int64', False)]
-
-}}
+ctypedef fused sparse_t:
+ float64_t
+ int64_t
-{{for dtype, float_group in dtypes}}
-{{if float_group}}
-
-cdef inline {{dtype}}_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
+cdef inline float64_t __div__(sparse_t a, sparse_t b):
if b == 0:
if a > 0:
return INF
@@ -30,63 +24,41 @@ cdef inline {{dtype}}_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
else:
return float(a) / b
-cdef inline {{dtype}}_t __truediv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
- return __div_{{dtype}}(a, b)
-cdef inline {{dtype}}_t __floordiv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
- if b == 0:
- # numpy >= 1.11 returns NaN
- # for a // 0, rather than +-inf
- if _np_version_under1p11:
- if a > 0:
- return INF
- elif a < 0:
- return -INF
- return NaN
- else:
- return a // b
+cdef inline float64_t __truediv__(sparse_t a, sparse_t b):
+ return __div__(a, b)
-cdef inline {{dtype}}_t __mod_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
- if b == 0:
- return NaN
- else:
- return a % b
-
-{{else}}
-cdef inline float64_t __div_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
+cdef inline sparse_t __mod__(sparse_t a, sparse_t b):
if b == 0:
- if a > 0:
- return INF
- elif a < 0:
- return -INF
- else:
+ if sparse_t is float64_t:
return NaN
+ else:
+ return 0
else:
- return float(a) / b
+ return a % b
-cdef inline float64_t __truediv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
- return __div_{{dtype}}(a, b)
-cdef inline {{dtype}}_t __floordiv_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
+cdef inline sparse_t __floordiv__(sparse_t a, sparse_t b):
if b == 0:
- return 0
+ if sparse_t is float64_t:
+ # numpy >= 1.11 returns NaN
+ # for a // 0, rather than +-inf
+ if _np_version_under1p11:
+ if a > 0:
+ return INF
+ elif a < 0:
+ return -INF
+ return NaN
+ else:
+ return 0
else:
return a // b
-cdef inline {{dtype}}_t __mod_{{dtype}}({{dtype}}_t a, {{dtype}}_t b):
- if b == 0:
- return 0
- else:
- return a % b
-{{endif}}
-
-{{endfor}}
-
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# sparse array op
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
{{py:
@@ -106,10 +78,10 @@ def get_op(tup):
ops_dict = {'add': '{0} + {1}',
'sub': '{0} - {1}',
'mul': '{0} * {1}',
- 'div': '__div_{2}({0}, {1})',
- 'mod': '__mod_{2}({0}, {1})',
- 'truediv': '__truediv_{2}({0}, {1})',
- 'floordiv': '__floordiv_{2}({0}, {1})',
+ 'div': '__div__({0}, {1})',
+ 'mod': '__mod__({0}, {1})',
+ 'truediv': '__truediv__({0}, {1})',
+ 'floordiv': '__floordiv__({0}, {1})',
'pow': '{0} ** {1}',
'eq': '{0} == {1}',
'ne': '{0} != {1}',
| Everything is passing locally, just want to run this through the CI for good measure before continuing down this path. | https://api.github.com/repos/pandas-dev/pandas/pulls/23022 | 2018-10-06T23:52:18Z | 2018-10-17T12:36:19Z | 2018-10-17T12:36:19Z | 2018-10-17T16:00:00Z |
DOC: Clarify rolling min_periods default value GH21489 | diff --git a/pandas/core/window.py b/pandas/core/window.py
index 4281d66a640e3..ea0ec79d655fb 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -462,7 +462,8 @@ class Window(_Window):
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
- this will default to 1.
+ `min_periods` will default to 1. Otherwise, `min_periods` will default
+ to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
| - [ ] closes #21489
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew Clarifies the documentation of the default value for the min_periods argument of the rolling function.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23021 | 2018-10-06T21:50:54Z | 2018-10-07T21:46:08Z | 2018-10-07T21:46:08Z | 2018-10-07T21:46:13Z |
PERF: only output an html id if a style is applied | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index ad6ad5bcaf309..3b3238586b310 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -64,6 +64,11 @@ class Styler(object):
a unique identifier to avoid CSS collisions; generated automatically
caption: str, default None
caption to attach to the table
+ cell_ids: bool, default True
+ If True, each cell will have an ``id`` attribute in their HTML tag.
+ The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
+ where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
+ number and ``<num_col>`` is the column number.
Attributes
----------
@@ -112,7 +117,7 @@ class Styler(object):
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
- caption=None, table_attributes=None):
+ caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
@@ -136,6 +141,7 @@ def __init__(self, data, precision=None, table_styles=None, uuid=None,
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
+ self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
@@ -306,14 +312,16 @@ def format_attr(pair):
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
- row_es.append({
- "type": "td",
- "value": value,
- "class": " ".join(cs),
- "id": "_".join(cs[1:]),
- "display_value": formatter(value),
- "is_visible": (c not in hidden_columns)
- })
+ row_dict = {"type": "td",
+ "value": value,
+ "class": " ".join(cs),
+ "display_value": formatter(value),
+ "is_visible": (c not in hidden_columns)}
+ # only add an id if the cell has a style
+ if (self.cell_ids or
+ not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
+ row_dict["id"] = "_".join(cs[1:])
+ row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
diff --git a/pandas/io/formats/templates/html.tpl b/pandas/io/formats/templates/html.tpl
index 706db1ecdd961..01ecde7d081f5 100644
--- a/pandas/io/formats/templates/html.tpl
+++ b/pandas/io/formats/templates/html.tpl
@@ -50,17 +50,17 @@
{%- endblock thead %}
{%- block tbody %}
<tbody>
- {%- block before_rows %}{%- endblock before_rows %}
- {%- for r in body %}
- {%- block tr scoped %}
- <tr>
- {%- for c in r %}
- {%- if c.is_visible != False %}
- <{{ c.type }} id="T_{{ uuid }}{{ c.id }}" class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}>
- {%- endif %}
- {%- endfor %}
- </tr>
- {%- endblock tr %}
+ {% block before_rows %}{% endblock before_rows %}
+ {% for r in body %}
+ {% block tr scoped %}
+ <tr>
+ {% for c in r %}
+ {% if c.is_visible != False %}
+ <{{ c.type }} {% if c.id is defined -%} id="T_{{ uuid }}{{ c.id }}" {%- endif %} class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}>
+ {% endif %}
+ {%- endfor %}
+ </tr>
+ {% endblock tr %}
{%- endfor %}
{%- block after_rows %}{%- endblock after_rows %}
</tbody>
| - [x] closes #20695
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Greatly reduces the amount of `id` tag in the cells by only assigning one when a style is applied to that cell. If that is the not correct approach for solving #20695, I am open to suggestions.
Also I was confused by the `%-` tags to handle the whitespaces. Is there a reason the default options to handle whitespace are not used? | https://api.github.com/repos/pandas-dev/pandas/pulls/23019 | 2018-10-06T18:44:09Z | 2018-10-14T16:10:09Z | 2018-10-14T16:10:09Z | 2018-10-14T20:52:51Z |
TST: further clean up of frame/test_analytics | diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index b0b9f2815cbb9..5327e3fcbea76 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -25,22 +25,44 @@
import pandas.util._test_decorators as td
-def _check_stat_op(name, alternative, main_frame, float_frame,
- float_string_frame, has_skipna=True,
- has_numeric_only=False, check_dtype=True,
- check_dates=False, check_less_precise=False,
- skipna_alternative=None):
-
- f = getattr(main_frame, name)
+def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
+ check_dtype=True, check_dates=False,
+ check_less_precise=False, skipna_alternative=None):
+ """
+ Check that operator opname works as advertised on frame
+
+ Parameters
+ ----------
+ opname : string
+ Name of the operator to test on frame
+ alternative : function
+ Function that opname is tested against; i.e. "frame.opname()" should
+ equal "alternative(frame)".
+ frame : DataFrame
+ The object that the tests are executed on
+ has_skipna : bool, default True
+ Whether the method "opname" has the kwarg "skip_na"
+ check_dtype : bool, default True
+ Whether the dtypes of the result of "frame.opname()" and
+ "alternative(frame)" should be checked.
+ check_dates : bool, default false
+ Whether opname should be tested on a Datetime Series
+ check_less_precise : bool, default False
+ Whether results should only be compared approximately;
+ passed on to tm.assert_series_equal
+ skipna_alternative : function, default None
+ NaN-safe version of alternative
+ """
+
+ f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
- _f = getattr(df, name)
- result = _f()
+ result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
- result = getattr(df, name)()
+ result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
@@ -52,11 +74,11 @@ def wrapper(x):
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
- tm.assert_series_equal(result0, main_frame.apply(wrapper),
+ tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
- tm.assert_series_equal(result1, main_frame.apply(wrapper, axis=1),
+ tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
@@ -64,49 +86,83 @@ def wrapper(x):
result0 = f(axis=0)
result1 = f(axis=1)
- tm.assert_series_equal(result0, main_frame.apply(skipna_wrapper),
+ tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
- if name in ['sum', 'prod']:
- expected = main_frame.apply(skipna_wrapper, axis=1)
+
+ if opname in ['sum', 'prod']:
+ expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
- lcd_dtype = main_frame.values.dtype
+ lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
- # make sure works on mixed-type frame
- getattr(float_string_frame, name)(axis=0)
- getattr(float_string_frame, name)(axis=1)
-
- if has_numeric_only:
- getattr(float_string_frame, name)(axis=0, numeric_only=True)
- getattr(float_string_frame, name)(axis=1, numeric_only=True)
- getattr(float_frame, name)(axis=0, numeric_only=False)
- getattr(float_frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
- all_na = float_frame * np.NaN
- r0 = getattr(all_na, name)(axis=0)
- r1 = getattr(all_na, name)(axis=1)
- if name in ['sum', 'prod']:
- unit = int(name == 'prod')
+ all_na = frame * np.NaN
+ r0 = getattr(all_na, opname)(axis=0)
+ r1 = getattr(all_na, opname)(axis=1)
+ if opname in ['sum', 'prod']:
+ unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
-def _check_bool_op(name, alternative, frame, float_string_frame,
- has_skipna=True, has_bool_only=False):
+def assert_stat_op_api(opname, float_frame, float_string_frame,
+ has_numeric_only=False):
+ """
+ Check that API for operator opname works as advertised on frame
+
+ Parameters
+ ----------
+ opname : string
+ Name of the operator to test on frame
+ float_frame : DataFrame
+ DataFrame with columns of type float
+ float_string_frame : DataFrame
+ DataFrame with both float and string columns
+ has_numeric_only : bool, default False
+ Whether the method "opname" has the kwarg "numeric_only"
+ """
+
+ # make sure works on mixed-type frame
+ getattr(float_string_frame, opname)(axis=0)
+ getattr(float_string_frame, opname)(axis=1)
- f = getattr(frame, name)
+ if has_numeric_only:
+ getattr(float_string_frame, opname)(axis=0, numeric_only=True)
+ getattr(float_string_frame, opname)(axis=1, numeric_only=True)
+ getattr(float_frame, opname)(axis=0, numeric_only=False)
+ getattr(float_frame, opname)(axis=1, numeric_only=False)
+
+
+def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
+ """
+ Check that bool operator opname works as advertised on frame
+
+ Parameters
+ ----------
+ opname : string
+ Name of the operator to test on frame
+ alternative : function
+ Function that opname is tested against; i.e. "frame.opname()" should
+ equal "alternative(frame)".
+ frame : DataFrame
+ The object that the tests are executed on
+ has_skipna : bool, default True
+ Whether the method "opname" has the kwarg "skip_na"
+ """
+
+ f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
@@ -118,6 +174,7 @@ def wrapper(x):
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
+
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
@@ -127,18 +184,48 @@ def wrapper(x):
result0 = f(axis=0)
result1 = f(axis=1)
+
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
- pytest.raises(ValueError, f, axis=2)
+ tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
- # make sure works on mixed-type frame
+ # all NA case
+ if has_skipna:
+ all_na = frame * np.NaN
+ r0 = getattr(all_na, opname)(axis=0)
+ r1 = getattr(all_na, opname)(axis=1)
+ if opname == 'any':
+ assert not r0.any()
+ assert not r1.any()
+ else:
+ assert r0.all()
+ assert r1.all()
+
+
+def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
+ has_bool_only=False):
+ """
+ Check that API for boolean operator opname works as advertised on frame
+
+ Parameters
+ ----------
+ opname : string
+ Name of the operator to test on frame
+ float_frame : DataFrame
+ DataFrame with columns of type float
+ float_string_frame : DataFrame
+ DataFrame with both float and string columns
+ has_bool_only : bool, default False
+ Whether the method "opname" has the kwarg "bool_only"
+ """
+ # make sure op works on mixed-type frame
mixed = float_string_frame
- mixed['_bool_'] = np.random.randn(len(mixed)) > 0
- getattr(mixed, name)(axis=0)
- getattr(mixed, name)(axis=1)
+ mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
+ getattr(mixed, opname)(axis=0)
+ getattr(mixed, opname)(axis=1)
class NonzeroFail(object):
@@ -148,22 +235,10 @@ def __nonzero__(self):
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
- getattr(mixed, name)(axis=0, bool_only=True)
- getattr(mixed, name)(axis=1, bool_only=True)
- getattr(frame, name)(axis=0, bool_only=False)
- getattr(frame, name)(axis=1, bool_only=False)
-
- # all NA case
- if has_skipna:
- all_na = frame * np.NaN
- r0 = getattr(all_na, name)(axis=0)
- r1 = getattr(all_na, name)(axis=1)
- if name == 'any':
- assert not r0.any()
- assert not r1.any()
- else:
- assert r0.all()
- assert r1.all()
+ getattr(mixed, opname)(axis=0, bool_only=True)
+ getattr(mixed, opname)(axis=1, bool_only=True)
+ getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
+ getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics():
@@ -596,10 +671,10 @@ def test_reduce_mixed_frame(self):
def test_count(self, float_frame_with_na, float_frame, float_string_frame):
f = lambda s: notna(s).sum()
- _check_stat_op('count', f, float_frame_with_na, float_frame,
- float_string_frame, has_skipna=False,
- has_numeric_only=True, check_dtype=False,
- check_dates=True)
+ assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False,
+ check_dtype=False, check_dates=True)
+ assert_stat_op_api('count', float_frame, float_string_frame,
+ has_numeric_only=True)
# corner case
frame = DataFrame()
@@ -628,9 +703,10 @@ def test_count(self, float_frame_with_na, float_frame, float_string_frame):
def test_nunique(self, float_frame_with_na, float_frame,
float_string_frame):
f = lambda s: len(algorithms.unique1d(s.dropna()))
- _check_stat_op('nunique', f, float_frame_with_na,
- float_frame, float_string_frame, has_skipna=False,
- check_dtype=False, check_dates=True)
+ assert_stat_op_calc('nunique', f, float_frame_with_na,
+ has_skipna=False, check_dtype=False,
+ check_dates=True)
+ assert_stat_op_api('nunique', float_frame, float_string_frame)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
@@ -644,15 +720,13 @@ def test_nunique(self, float_frame_with_na, float_frame,
def test_sum(self, float_frame_with_na, mixed_float_frame,
float_frame, float_string_frame):
- _check_stat_op('sum', np.sum, float_frame_with_na, float_frame,
- float_string_frame, has_numeric_only=True,
- skipna_alternative=np.nansum)
-
+ assert_stat_op_api('sum', float_frame, float_string_frame,
+ has_numeric_only=True)
+ assert_stat_op_calc('sum', np.sum, float_frame_with_na,
+ skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
- _check_stat_op('sum', np.sum,
- mixed_float_frame.astype('float32'), float_frame,
- float_string_frame, has_numeric_only=True,
- check_dtype=False, check_less_precise=True)
+ assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
+ check_dtype=False, check_less_precise=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
@@ -679,13 +753,14 @@ def test_stat_operators_attempt_obj_array(self, method):
tm.assert_series_equal(result, expected)
def test_mean(self, float_frame_with_na, float_frame, float_string_frame):
- _check_stat_op('mean', np.mean, float_frame_with_na,
- float_frame, float_string_frame, check_dates=True)
+ assert_stat_op_calc('mean', np.mean, float_frame_with_na,
+ check_dates=True)
+ assert_stat_op_api('mean', float_frame, float_string_frame)
def test_product(self, float_frame_with_na, float_frame,
float_string_frame):
- _check_stat_op('product', np.prod, float_frame_with_na,
- float_frame, float_string_frame)
+ assert_stat_op_calc('product', np.prod, float_frame_with_na)
+ assert_stat_op_api('product', float_frame, float_string_frame)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
@@ -696,18 +771,18 @@ def wrapper(x):
return np.nan
return np.median(x)
- _check_stat_op('median', wrapper, float_frame_with_na,
- float_frame, float_string_frame, check_dates=True)
+ assert_stat_op_calc('median', wrapper, float_frame_with_na,
+ check_dates=True)
+ assert_stat_op_api('median', float_frame, float_string_frame)
def test_min(self, float_frame_with_na, int_frame,
float_frame, float_string_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
- _check_stat_op('min', np.min, float_frame_with_na,
- float_frame, float_string_frame,
- check_dates=True)
- _check_stat_op('min', np.min, int_frame, float_frame,
- float_string_frame)
+ assert_stat_op_calc('min', np.min, float_frame_with_na,
+ check_dates=True)
+ assert_stat_op_calc('min', np.min, int_frame)
+ assert_stat_op_api('min', float_frame, float_string_frame)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = nan
@@ -759,26 +834,25 @@ def test_max(self, float_frame_with_na, int_frame,
float_frame, float_string_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
- _check_stat_op('max', np.max, float_frame_with_na,
- float_frame, float_string_frame,
- check_dates=True)
- _check_stat_op('max', np.max, int_frame, float_frame,
- float_string_frame)
+ assert_stat_op_calc('max', np.max, float_frame_with_na,
+ check_dates=True)
+ assert_stat_op_calc('max', np.max, int_frame)
+ assert_stat_op_api('max', float_frame, float_string_frame)
def test_mad(self, float_frame_with_na, float_frame, float_string_frame):
f = lambda x: np.abs(x - x.mean()).mean()
- _check_stat_op('mad', f, float_frame_with_na, float_frame,
- float_string_frame)
+ assert_stat_op_calc('mad', f, float_frame_with_na)
+ assert_stat_op_api('mad', float_frame, float_string_frame)
def test_var_std(self, float_frame_with_na, datetime_frame, float_frame,
float_string_frame):
alt = lambda x: np.var(x, ddof=1)
- _check_stat_op('var', alt, float_frame_with_na, float_frame,
- float_string_frame)
+ assert_stat_op_calc('var', alt, float_frame_with_na)
+ assert_stat_op_api('var', float_frame, float_string_frame)
alt = lambda x: np.std(x, ddof=1)
- _check_stat_op('std', alt, float_frame_with_na, float_frame,
- float_string_frame)
+ assert_stat_op_calc('std', alt, float_frame_with_na)
+ assert_stat_op_api('std', float_frame, float_string_frame)
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
@@ -892,8 +966,8 @@ def test_cumprod(self, datetime_frame):
def test_sem(self, float_frame_with_na, datetime_frame,
float_frame, float_string_frame):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
- _check_stat_op('sem', alt, float_frame_with_na,
- float_frame, float_string_frame)
+ assert_stat_op_calc('sem', alt, float_frame_with_na)
+ assert_stat_op_api('sem', float_frame, float_string_frame)
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
@@ -917,8 +991,8 @@ def alt(x):
return np.nan
return skew(x, bias=False)
- _check_stat_op('skew', alt, float_frame_with_na,
- float_frame, float_string_frame)
+ assert_stat_op_calc('skew', alt, float_frame_with_na)
+ assert_stat_op_api('skew', float_frame, float_string_frame)
@td.skip_if_no_scipy
def test_kurt(self, float_frame_with_na, float_frame, float_string_frame):
@@ -929,8 +1003,8 @@ def alt(x):
return np.nan
return kurtosis(x, bias=False)
- _check_stat_op('kurt', alt, float_frame_with_na,
- float_frame, float_string_frame)
+ assert_stat_op_calc('kurt', alt, float_frame_with_na)
+ assert_stat_op_api('kurt', float_frame, float_string_frame)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
@@ -1205,9 +1279,9 @@ def wrapper(x):
return np.nan
return np.median(x)
- _check_stat_op('median', wrapper, int_frame, float_frame,
- float_string_frame, check_dtype=False,
- check_dates=True)
+ assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
+ check_dates=True)
+ assert_stat_op_api('median', float_frame, float_string_frame)
# Miscellanea
@@ -1262,13 +1336,12 @@ def test_idxmax(self, float_frame, int_frame):
# ----------------------------------------------------------------------
# Logical reductions
- def test_any_all(self, bool_frame_with_na, float_string_frame):
- _check_bool_op('any', np.any, bool_frame_with_na,
- float_string_frame, has_skipna=True,
- has_bool_only=True)
- _check_bool_op('all', np.all, bool_frame_with_na,
- float_string_frame, has_skipna=True,
- has_bool_only=True)
+ @pytest.mark.parametrize('opname', ['any', 'all'])
+ def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
+ assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
+ has_skipna=True)
+ assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
+ has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
| Follow-up to #22733, where some things were left undone to ease reviewing.
Mainly:
* cleaning up the jumble of `_check_stat_op`, splitting it into two functions with separate purposes
* this removes redundant calls to the API-portion of the function in tests like `test_max` etc.
* renaming according to review in #22733
* same for `_check_bool_op`
* parametrize `test_any_all` according to review in #22733
| https://api.github.com/repos/pandas-dev/pandas/pulls/23016 | 2018-10-06T16:18:40Z | 2018-10-07T21:47:57Z | 2018-10-07T21:47:57Z | 2018-10-08T17:08:54Z |
DOC: improve documentation of DateOffset (closes #22929) | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 85b0abe421eb2..575d00cfcbc61 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -157,7 +157,7 @@ For example:
pd.Period('2012-05', freq='D')
-:class:`Timestamp` and :class:`Period` can serve as an index. Lists of
+:class:`Timestamp` and :class:`Period` can serve as an index. Lists of
``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex`
and :class:`PeriodIndex` respectively.
@@ -220,7 +220,7 @@ you can pass the ``dayfirst`` flag:
can't be parsed with the day being first it will be parsed as if
``dayfirst`` were False.
-If you pass a single string to ``to_datetime``, it returns a single ``Timestamp``.
+If you pass a single string to ``to_datetime``, it returns a single ``Timestamp``.
``Timestamp`` can also accept string input, but it doesn't accept string parsing
options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are required.
@@ -255,7 +255,7 @@ This could also potentially speed up the conversion considerably.
pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M')
-For more information on the choices available when specifying the ``format``
+For more information on the choices available when specifying the ``format``
option, see the Python `datetime documentation`_.
.. _datetime documentation: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
@@ -473,7 +473,7 @@ Custom Frequency Ranges
This functionality was originally exclusive to ``cdate_range``, which is
deprecated as of version 0.21.0 in favor of ``bdate_range``. Note that
``cdate_range`` only utilizes the ``weekmask`` and ``holidays`` parameters
- when custom business day, 'C', is passed as the frequency string. Support has
+ when custom business day, 'C', is passed as the frequency string. Support has
been expanded with ``bdate_range`` to work with any custom frequency string.
.. versionadded:: 0.21.0
@@ -589,7 +589,7 @@ would include matching times on an included date:
dft
dft['2013']
-This starts on the very first time in the month, and includes the last date and
+This starts on the very first time in the month, and includes the last date and
time for the month:
.. ipython:: python
@@ -664,7 +664,7 @@ A timestamp string with minute resolution (or more accurate), gives a scalar ins
series_minute['2011-12-31 23:59']
series_minute['2011-12-31 23:59:00']
-If index resolution is second, then the minute-accurate timestamp gives a
+If index resolution is second, then the minute-accurate timestamp gives a
``Series``.
.. ipython:: python
@@ -728,9 +728,9 @@ With no defaults.
Truncating & Fancy Indexing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-A :meth:`~DataFrame.truncate` convenience function is provided that is similar
-to slicing. Note that ``truncate`` assumes a 0 value for any unspecified date
-component in a ``DatetimeIndex`` in contrast to slicing which returns any
+A :meth:`~DataFrame.truncate` convenience function is provided that is similar
+to slicing. Note that ``truncate`` assumes a 0 value for any unspecified date
+component in a ``DatetimeIndex`` in contrast to slicing which returns any
partially matching dates:
.. ipython:: python
@@ -814,7 +814,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
is_year_end,"Logical indicating if last day of year (defined by frequency)"
is_leap_year,"Logical indicating if the date belongs to a leap year"
-Furthermore, if you have a ``Series`` with datetimelike values, then you can
+Furthermore, if you have a ``Series`` with datetimelike values, then you can
access these properties via the ``.dt`` accessor, as detailed in the section
on :ref:`.dt accessors<basics.dt_accessors>`.
@@ -889,9 +889,29 @@ The key features of a ``DateOffset`` object are:
* It can be multiplied by an integer (positive or negative) so that the
increment will be applied multiple times.
* It has :meth:`~pandas.DateOffset.rollforward` and
- :meth:`~pandas.DateOffset.rollback` methods for moving a date forward or
+ :meth:`~pandas.DateOffset.rollback` methods for moving a date forward or
backward to the next or previous "offset date".
+.. note::
+
+ Beware that adding an offset to a ``DatetimeIndexes`` with
+ consecutive values can produce a ``DatetimeIndexes`` with gaps, for example:
+
+ .. ipython:: python
+
+ index = pd.DatetimeIndex(start='2018-02-27', periods=3, freq='D')
+ index
+ index + pd.tseries.offsets.DateOffset(months=4, days=2)
+
+ A way to achieve ``DatetimeIndexes`` without gaps is to shift the first date
+ with ``DateOffset`` then construct the ``DatetimeIndexes`` using the original
+ frequency:
+
+ .. ipython:: python
+ start_date = index[0] + pd.tseries.offsets.DateOffset(months=4, days=2)
+ new_index = pd.DatetimeIndex(start=start_date, periods=3, freq='D')
+ new_index
+
Subclasses of ``DateOffset`` define the ``apply`` function which dictates
custom date increment logic, such as adding business days:
@@ -919,7 +939,7 @@ The ``rollforward`` and ``rollback`` methods do exactly what you would expect:
It's definitely worth exploring the ``pandas.tseries.offsets`` module and the
various docstrings for the classes.
-These operations (``apply``, ``rollforward`` and ``rollback``) preserve time
+These operations (``apply``, ``rollforward`` and ``rollback``) preserve time
(hour, minute, etc) information by default. To reset time, use ``normalize``
before or after applying the operation (depending on whether you want the
time information included in the operation.
@@ -1111,8 +1131,8 @@ allowing to use specific start and end times.
By default, ``BusinessHour`` uses 9:00 - 17:00 as business hours.
Adding ``BusinessHour`` will increment ``Timestamp`` by hourly frequency.
-If target ``Timestamp`` is out of business hours, move to the next business hour
-then increment it. If the result exceeds the business hours end, the remaining
+If target ``Timestamp`` is out of business hours, move to the next business hour
+then increment it. If the result exceeds the business hours end, the remaining
hours are added to the next business day.
.. ipython:: python
@@ -1139,9 +1159,9 @@ hours are added to the next business day.
# Subtracting 3 business hours
pd.Timestamp('2014-08-01 10:00') + BusinessHour(-3)
-You can also specify ``start`` and ``end`` time by keywords. The argument must
-be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
-instance. Specifying seconds, microseconds and nanoseconds as business hour
+You can also specify ``start`` and ``end`` time by keywords. The argument must
+be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
+instance. Specifying seconds, microseconds and nanoseconds as business hour
results in ``ValueError``.
.. ipython:: python
@@ -1198,8 +1218,8 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet
# The result is the same as rollworward because BusinessDay never overlap.
BusinessHour().apply(pd.Timestamp('2014-08-02'))
-``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
-holidays, you can use ``CustomBusinessHour`` offset, as explained in the
+``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
+holidays, you can use ``CustomBusinessHour`` offset, as explained in the
following subsection.
.. _timeseries.custombusinesshour:
@@ -1490,7 +1510,7 @@ Shifting / Lagging
~~~~~~~~~~~~~~~~~~
One may want to *shift* or *lag* the values in a time series back and forward in
-time. The method for this is :meth:`~Series.shift`, which is available on all of
+time. The method for this is :meth:`~Series.shift`, which is available on all of
the pandas objects.
.. ipython:: python
@@ -1500,7 +1520,7 @@ the pandas objects.
ts.shift(1)
The ``shift`` method accepts an ``freq`` argument which can accept a
-``DateOffset`` class or other ``timedelta``-like object or also an
+``DateOffset`` class or other ``timedelta``-like object or also an
:ref:`offset alias <timeseries.offset_aliases>`:
.. ipython:: python
@@ -1509,7 +1529,7 @@ The ``shift`` method accepts an ``freq`` argument which can accept a
ts.shift(5, freq='BM')
Rather than changing the alignment of the data and the index, ``DataFrame`` and
-``Series`` objects also have a :meth:`~Series.tshift` convenience method that
+``Series`` objects also have a :meth:`~Series.tshift` convenience method that
changes all the dates in the index by a specified number of offsets:
.. ipython:: python
@@ -1522,9 +1542,9 @@ is not being realigned.
Frequency Conversion
~~~~~~~~~~~~~~~~~~~~
-The primary function for changing frequencies is the :meth:`~Series.asfreq`
-method. For a ``DatetimeIndex``, this is basically just a thin, but convenient
-wrapper around :meth:`~Series.reindex` which generates a ``date_range`` and
+The primary function for changing frequencies is the :meth:`~Series.asfreq`
+method. For a ``DatetimeIndex``, this is basically just a thin, but convenient
+wrapper around :meth:`~Series.reindex` which generates a ``date_range`` and
calls ``reindex``.
.. ipython:: python
@@ -1544,13 +1564,13 @@ method for any gaps that may appear after the frequency conversion.
Filling Forward / Backward
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Related to ``asfreq`` and ``reindex`` is :meth:`~Series.fillna`, which is
+Related to ``asfreq`` and ``reindex`` is :meth:`~Series.fillna`, which is
documented in the :ref:`missing data section <missing_data.fillna>`.
Converting to Python Datetimes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``DatetimeIndex`` can be converted to an array of Python native
+``DatetimeIndex`` can be converted to an array of Python native
:py:class:`datetime.datetime` objects using the ``to_pydatetime`` method.
.. _timeseries.resampling:
@@ -1563,13 +1583,13 @@ Resampling
The interface to ``.resample`` has changed in 0.18.0 to be more groupby-like and hence more flexible.
See the :ref:`whatsnew docs <whatsnew_0180.breaking.resample>` for a comparison with prior versions.
-Pandas has a simple, powerful, and efficient functionality for performing
-resampling operations during frequency conversion (e.g., converting secondly
-data into 5-minutely data). This is extremely common in, but not limited to,
+Pandas has a simple, powerful, and efficient functionality for performing
+resampling operations during frequency conversion (e.g., converting secondly
+data into 5-minutely data). This is extremely common in, but not limited to,
financial applications.
-:meth:`~Series.resample` is a time-based groupby, followed by a reduction method
-on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for
+:meth:`~Series.resample` is a time-based groupby, followed by a reduction method
+on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for
some advanced strategies.
Starting in version 0.18.1, the ``resample()`` function can be used directly from
@@ -1577,7 +1597,7 @@ Starting in version 0.18.1, the ``resample()`` function can be used directly fro
.. note::
- ``.resample()`` is similar to using a :meth:`~Series.rolling` operation with
+ ``.resample()`` is similar to using a :meth:`~Series.rolling` operation with
a time-based offset, see a discussion :ref:`here <stats.moments.ts-versus-resampling>`.
Basics
@@ -1632,8 +1652,8 @@ labels.
.. note::
- The default values for ``label`` and ``closed`` is 'left' for all
- frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W'
+ The default values for ``label`` and ``closed`` is 'left' for all
+ frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W'
which all have a default of 'right'.
.. ipython:: python
@@ -1680,9 +1700,9 @@ Sparse Resampling
~~~~~~~~~~~~~~~~~
Sparse timeseries are the ones where you have a lot fewer points relative
-to the amount of time you are looking to resample. Naively upsampling a sparse
-series can potentially generate lots of intermediate values. When you don't want
-to use a method to fill these values, e.g. ``fill_method`` is ``None``, then
+to the amount of time you are looking to resample. Naively upsampling a sparse
+series can potentially generate lots of intermediate values. When you don't want
+to use a method to fill these values, e.g. ``fill_method`` is ``None``, then
intermediate values will be filled with ``NaN``.
Since ``resample`` is a time-based groupby, the following is a method to efficiently
| - [x] closes #22929
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23014 | 2018-10-06T13:22:04Z | 2018-12-09T20:13:33Z | null | 2018-12-09T20:13:33Z |
BUG: millis res on parsed DatetimeIndex slice | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index a6cdaa0c2163a..3a6b7b84fa753 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1079,6 +1079,13 @@ def _parsed_string_to_bounds(self, reso, parsed):
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
+ elif reso == 'millisecond':
+ st = datetime(parsed.year, parsed.month, parsed.day,
+ parsed.hour, parsed.minute, parsed.second,
+ parsed.microsecond)
+ return (Timestamp(st, tz=self.tz),
+ Timestamp(Timestamp(st + offsets.Milli(),
+ tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
| Added support for millis resolution on a parsed DatetimeIndex slice.
There was no elif statement for when _parsed_string_to_bounds(self,
reso, parsed) was given reso == 'millisecond'
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23010 | 2018-10-05T21:11:16Z | 2018-11-23T03:32:22Z | null | 2018-11-23T03:32:22Z |
fixes the issue #22953 | diff --git a/pandas/core/series.py b/pandas/core/series.py
index a613b22ea9046..16a49ec970e80 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2310,7 +2310,10 @@ def combine(self, other, func, fill_value=None):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
- new_values.append(func(lv, rv))
+ if(np.isnan(func(lv, rv))):
+ new_values.append(fill_value)
+ else:
+ new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
| - [x] closes #22953
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23005 | 2018-10-05T07:03:12Z | 2018-10-05T15:23:48Z | null | 2018-10-05T18:46:44Z |
CLN GH22875 Replace bare excepts by explicit excepts in pandas/io/ | diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 0d564069c681f..70c978a3b62ed 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -42,7 +42,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
- except:
+ except AttributeError:
pass
# Excel copies into clipboard with \t separation
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index b8b28a0b0c98c..e347f6bce0168 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -100,7 +100,7 @@ def check_main():
try:
return __IPYTHON__ or check_main() # noqa
- except:
+ except NameError:
return check_main()
@@ -118,7 +118,7 @@ def in_qtconsole():
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
- except:
+ except NameError:
return False
return False
@@ -137,7 +137,7 @@ def in_ipnb():
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
- except:
+ except NameError:
return False
return False
@@ -149,7 +149,7 @@ def in_ipython_frontend():
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
- except:
+ except NameError:
pass
return False
diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py
index dcd6f2cf4a718..2846525adbe6b 100644
--- a/pandas/io/formats/terminal.py
+++ b/pandas/io/formats/terminal.py
@@ -78,7 +78,7 @@ def _get_terminal_size_windows():
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
- except:
+ except (AttributeError, ValueError):
return None
if res:
import struct
@@ -108,7 +108,7 @@ def _get_terminal_size_tput():
output = proc.communicate(input=None)
rows = int(output[0])
return (cols, rows)
- except:
+ except OSError:
return None
@@ -120,7 +120,7 @@ def ioctl_GWINSZ(fd):
import struct
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
- except:
+ except (struct.error, IOError):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
@@ -129,13 +129,13 @@ def ioctl_GWINSZ(fd):
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
- except:
+ except OSError:
pass
if not cr or cr == (0, 0):
try:
from os import environ as env
cr = (env['LINES'], env['COLUMNS'])
- except:
+ except (ValueError, KeyError):
return None
return int(cr[1]), int(cr[0])
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 7a1e72637f4ce..77fbb27d01f86 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -194,7 +194,7 @@ def read(fh):
if should_close:
try:
path_or_buf.close()
- except: # noqa: flake8
+ except IOError:
pass
return l
@@ -703,7 +703,7 @@ def create_block(b):
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
- except:
+ except (ValueError, TypeError):
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a4f1155117b12..2def3b81c9518 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -459,7 +459,7 @@ def _read(filepath_or_buffer, kwds):
if should_close:
try:
filepath_or_buffer.close()
- except: # noqa: flake8
+ except ValueError:
pass
return data
@@ -1808,7 +1808,7 @@ def close(self):
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
- except:
+ except ValueError:
pass
def _set_noconvert_columns(self):
@@ -3034,7 +3034,7 @@ def converter(*date_cols):
errors='ignore',
infer_datetime_format=infer_datetime_format
)
- except:
+ except ValueError:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst))
else:
@@ -3263,7 +3263,7 @@ def _floatify_na_values(na_values):
v = float(v)
if not np.isnan(v):
result.add(v)
- except:
+ except (TypeError, ValueError, OverflowError):
pass
return result
@@ -3284,11 +3284,11 @@ def _stringify_na_values(na_values):
result.append(str(v))
result.append(v)
- except:
+ except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
- except:
+ except (TypeError, ValueError, OverflowError):
pass
return set(result)
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 9c219d7fd6997..d52a571da0d61 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -163,18 +163,20 @@ def try_read(path, encoding=None):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return read_wrapper(lambda f: pkl.load(f))
- except Exception:
+ except Exception: # noqa: E722
# reg/patched pickle
+ # compat not used in pandas/compat/pickle_compat.py::load
+ # TODO: remove except block OR modify pc.load to use compat
try:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=False))
# compat pickle
- except:
+ except Exception: # noqa: E722
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=True))
try:
return try_read(path)
- except:
+ except Exception: # noqa: E722
if PY3:
return try_read(path, encoding='latin1')
raise
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 14e7ad9682db6..385396909a07b 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -246,7 +246,7 @@ def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
- except:
+ except UnicodeEncodeError:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index b8a0bf5733158..d72996a8e6157 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -46,7 +46,7 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None,
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
- except:
+ except ValueError:
pass
if format.lower() == 'xport':
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index a582d32741ae9..882fa0092b2cf 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -382,7 +382,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
try:
_is_table_name = pandas_sql.has_table(sql)
- except:
+ except (ImportError, AttributeError):
_is_table_name = False
if _is_table_name:
@@ -847,7 +847,7 @@ def _sqlalchemy_type(self, col):
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
- except:
+ except AttributeError:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
@@ -1360,7 +1360,7 @@ def run_transaction(self):
try:
yield cur
self.con.commit()
- except:
+ except Exception:
self.con.rollback()
raise
finally:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index efd5f337fdf69..a321e315f5225 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1252,12 +1252,12 @@ def _read_old_header(self, first_char):
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
- except:
+ except ValueError:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
- except:
+ except ValueError:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
| - [X] closes #22875
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is a resubmit of a previous PR I submitted that was approved (#22916). I tried to close and reopen to get a rebuild on travis but I couldn't reopen it because I had deleted my old fork (after accidentally pushing to master). | https://api.github.com/repos/pandas-dev/pandas/pulls/23004 | 2018-10-05T04:49:25Z | 2018-10-10T07:33:29Z | 2018-10-10T07:33:29Z | 2018-10-10T07:33:53Z |
fix typos | diff --git a/README.md b/README.md
index f26b9598bb5d3..b4dedecb4c697 100644
--- a/README.md
+++ b/README.md
@@ -97,7 +97,7 @@ easy and intuitive. It aims to be the fundamental high-level building block for
doing practical, **real world** data analysis in Python. Additionally, it has
the broader goal of becoming **the most powerful and flexible open source data
analysis / manipulation tool available in any language**. It is already well on
-its way toward this goal.
+its way towards this goal.
## Main Features
Here are just a few of the things that pandas does well:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23003 | 2018-10-05T04:28:02Z | 2018-10-05T11:58:41Z | 2018-10-05T11:58:41Z | 2018-10-05T14:28:10Z |
CI: Fix yaml/yml inconsistency | diff --git a/ci/azure-macos-35.yml b/ci/azure-macos-35.yaml
similarity index 100%
rename from ci/azure-macos-35.yml
rename to ci/azure-macos-35.yaml
diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml
index 5bf8d18d6cbb9..9bfaef04ea2fa 100644
--- a/ci/azure/macos.yml
+++ b/ci/azure/macos.yml
@@ -10,7 +10,7 @@ jobs:
maxParallel: 11
matrix:
py35_np_110:
- ENV_FILE: ci/azure-macos-35.yml
+ ENV_FILE: ci/azure-macos-35.yaml
CONDA_PY: "35"
CONDA_ENV: pandas
TEST_ARGS: "--skip-slow --skip-network"
diff --git a/ci/azure/windows-py27.yml b/ci/azure/windows-py27.yml
index 3e92c96263930..10251bc03b8dc 100644
--- a/ci/azure/windows-py27.yml
+++ b/ci/azure/windows-py27.yml
@@ -10,7 +10,7 @@ jobs:
maxParallel: 11
matrix:
py36_np14:
- ENV_FILE: ci/azure-windows-27.yml
+ ENV_FILE: ci/azure-windows-27.yaml
CONDA_PY: "27"
CONDA_ENV: pandas
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index 2ab8c6f320188..fe6c9c933d474 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -10,7 +10,7 @@ jobs:
maxParallel: 11
matrix:
py36_np14:
- ENV_FILE: ci/azure-windows-36.yml
+ ENV_FILE: ci/azure-windows-36.yaml
CONDA_PY: "36"
CONDA_ENV: pandas
| The ci/windows.yml and ci/windows-py27.yml files reference the environment files with ".yml". This PR moves the ".yaml" files to the indicated location.
CI config is hard. | https://api.github.com/repos/pandas-dev/pandas/pulls/23001 | 2018-10-05T00:44:14Z | 2018-10-08T20:09:22Z | 2018-10-08T20:09:22Z | 2018-10-09T00:53:16Z |
Make DataFrame arithmetic ops with 2D arrays behave like numpy analogues | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 91575c311b409..66f3d4bd1c4f3 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -488,6 +488,39 @@ Previous Behavior:
0 NaT
+.. _whatsnew_0240.api.dataframe_arithmetic_broadcasting:
+
+DataFrame Arithmetic Operations Broadcasting Changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:class:`DataFrame` arithmetic operations when operating with 2-dimensional
+``np.ndarray`` objects now broadcast in the same way as ``np.ndarray``s
+broadcast. (:issue:`23000`)
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [3]: arr = np.arange(6).reshape(3, 2)
+ In [4]: df = pd.DataFrame(arr)
+ In [5]: df + arr[[0], :] # 1 row, 2 columns
+ ...
+ ValueError: Unable to coerce to DataFrame, shape must be (3, 2): given (1, 2)
+ In [6]: df + arr[:, [1]] # 1 column, 3 rows
+ ...
+ ValueError: Unable to coerce to DataFrame, shape must be (3, 2): given (3, 1)
+
+*Current Behavior*:
+
+.. ipython:: python
+ arr = np.arange(6).reshape(3, 2)
+ df = pd.DataFrame(arr)
+ df
+
+.. ipython:: python
+ df + arr[[0], :] # 1 row, 2 columns
+ df + arr[:, [1]] # 1 column, 3 rows
+
+
.. _whatsnew_0240.api.extension:
ExtensionType Changes
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index dc99faaf68f51..20559bca9caed 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1799,14 +1799,32 @@ def to_series(right):
right = to_series(right)
elif right.ndim == 2:
- if left.shape != right.shape:
+ if right.shape == left.shape:
+ right = left._constructor(right, index=left.index,
+ columns=left.columns)
+
+ elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
+ # Broadcast across columns
+ try:
+ right = np.broadcast_to(right, left.shape)
+ except AttributeError:
+ # numpy < 1.10.0
+ right = np.tile(right, (1, left.shape[1]))
+
+ right = left._constructor(right,
+ index=left.index,
+ columns=left.columns)
+
+ elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
+ # Broadcast along rows
+ right = to_series(right[0, :])
+
+ else:
raise ValueError("Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}"
.format(req_shape=left.shape,
given_shape=right.shape))
- right = left._constructor(right, index=left.index,
- columns=left.columns)
elif right.ndim > 2:
raise ValueError('Unable to coerce to Series/DataFrame, dim '
'must be <= 2: {dim}'.format(dim=right.shape))
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 2eb11c3a2e2f7..b97c5e4f7d7c2 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -99,6 +99,7 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname):
# Arithmetic
class TestFrameFlexArithmetic(object):
+
def test_df_add_td64_columnwise(self):
# GH#22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range('2016-01-01', periods=10)
@@ -252,6 +253,99 @@ def test_arith_flex_zero_len_raises(self):
class TestFrameArithmetic(object):
+ def test_df_add_2d_array_rowlike_broadcasts(self):
+ # GH#23000
+ arr = np.arange(6).reshape(3, 2)
+ df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
+
+ rowlike = arr[[1], :] # shape --> (1, ncols)
+ assert rowlike.shape == (1, df.shape[1])
+
+ expected = pd.DataFrame([[2, 4],
+ [4, 6],
+ [6, 8]],
+ columns=df.columns, index=df.index,
+ # specify dtype explicitly to avoid failing
+ # on 32bit builds
+ dtype=arr.dtype)
+ result = df + rowlike
+ tm.assert_frame_equal(result, expected)
+ result = rowlike + df
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_add_2d_array_collike_broadcasts(self):
+ # GH#23000
+ arr = np.arange(6).reshape(3, 2)
+ df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
+
+ collike = arr[:, [1]] # shape --> (nrows, 1)
+ assert collike.shape == (df.shape[0], 1)
+
+ expected = pd.DataFrame([[1, 2],
+ [5, 6],
+ [9, 10]],
+ columns=df.columns, index=df.index,
+ # specify dtype explicitly to avoid failing
+ # on 32bit builds
+ dtype=arr.dtype)
+ result = df + collike
+ tm.assert_frame_equal(result, expected)
+ result = collike + df
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_arith_2d_array_rowlike_broadcasts(self,
+ all_arithmetic_operators):
+ # GH#23000
+ opname = all_arithmetic_operators
+
+ arr = np.arange(6).reshape(3, 2)
+ df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
+
+ rowlike = arr[[1], :] # shape --> (1, ncols)
+ assert rowlike.shape == (1, df.shape[1])
+
+ exvals = [getattr(df.loc['A'], opname)(rowlike.squeeze()),
+ getattr(df.loc['B'], opname)(rowlike.squeeze()),
+ getattr(df.loc['C'], opname)(rowlike.squeeze())]
+
+ expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
+
+ if opname in ['__rmod__', '__rfloordiv__']:
+ # exvals will have dtypes [f8, i8, i8] so expected will be
+ # all-f8, but the DataFrame operation will return mixed dtypes
+ # use exvals[-1].dtype instead of "i8" for compat with 32-bit
+ # systems/pythons
+ expected[False] = expected[False].astype(exvals[-1].dtype)
+
+ result = getattr(df, opname)(rowlike)
+ tm.assert_frame_equal(result, expected)
+
+ def test_df_arith_2d_array_collike_broadcasts(self,
+ all_arithmetic_operators):
+ # GH#23000
+ opname = all_arithmetic_operators
+
+ arr = np.arange(6).reshape(3, 2)
+ df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
+
+ collike = arr[:, [1]] # shape --> (nrows, 1)
+ assert collike.shape == (df.shape[0], 1)
+
+ exvals = {True: getattr(df[True], opname)(collike.squeeze()),
+ False: getattr(df[False], opname)(collike.squeeze())}
+
+ dtype = None
+ if opname in ['__rmod__', '__rfloordiv__']:
+ # Series ops may return mixed int/float dtypes in cases where
+ # DataFrame op will return all-float. So we upcast `expected`
+ dtype = np.common_type(*[x.values for x in exvals.values()])
+
+ expected = pd.DataFrame(exvals, columns=df.columns, index=df.index,
+ dtype=dtype)
+
+ result = getattr(df, opname)(collike)
+ tm.assert_frame_equal(result, expected)
+
def test_df_bool_mul_int(self):
# GH#22047, GH#22163 multiplication by 1 should result in int dtype,
# not object dtype
| - [x] closes #22686
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Change DataFrame arithmetic behavior when operating against 2D-ndarray such that `op(df, arr)` broadcasts like `op(df.values, arr)`.
Related: #22880. Note this does NOT change the behavior of DataFrame comparison operations, so that PR (more specifically, the tests) will have to be updated if this is merged.
@timlod can you confirm that this is what you ha din mind in #22686?
| https://api.github.com/repos/pandas-dev/pandas/pulls/23000 | 2018-10-04T22:25:17Z | 2018-10-07T22:38:27Z | 2018-10-07T22:38:27Z | 2018-10-08T00:13:33Z |
BUG: Concat multiple different ExtensionArray types | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index dde098be2e5ae..3d7e7686b2db6 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -602,6 +602,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`).
- Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`)
- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`)
+- Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`)
- Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`)
- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`).
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index ac824708245d2..2b1778e5bcb2e 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -560,11 +560,6 @@ def _concat_sparse(to_concat, axis=0, typs=None):
fill_values = [x.fill_value for x in to_concat
if isinstance(x, SparseArray)]
-
- if len(set(fill_values)) > 1:
- raise ValueError("Cannot concatenate SparseArrays with different "
- "fill values")
-
fill_value = fill_values[0]
# TODO: Fix join unit generation so we aren't passed this.
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 3667d7c5e39dc..2646dbd33815d 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1636,8 +1636,7 @@ def concat(self, to_concat, new_axis):
# check if all series are of the same block type:
if len(non_empties) > 0:
blocks = [obj.blocks[0] for obj in non_empties]
-
- if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa
+ if len({b.dtype for b in blocks}) == 1:
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index f84d24295b049..be1c61166e4b1 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -100,7 +100,9 @@ def test_hashable(self, dtype):
class TestInterface(BaseDecimal, base.BaseInterfaceTests):
- pass
+
+ pytestmark = pytest.mark.skipif(compat.PY2,
+ reason="Unhashble dtype in Py2.")
class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
@@ -112,7 +114,8 @@ def test_from_dtype(self, data):
class TestReshaping(BaseDecimal, base.BaseReshapingTests):
- pass
+ pytestmark = pytest.mark.skipif(compat.PY2,
+ reason="Unhashble dtype in Py2.")
class TestGetitem(BaseDecimal, base.BaseGetitemTests):
@@ -174,7 +177,8 @@ class TestCasting(BaseDecimal, base.BaseCastingTests):
class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
- pass
+ pytestmark = pytest.mark.skipif(compat.PY2,
+ reason="Unhashble dtype in Py2.")
class TestSetitem(BaseDecimal, base.BaseSetitemTests):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 2aaa04d571e69..d39c9fafe5749 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -1,6 +1,7 @@
from warnings import catch_warnings, simplefilter
from itertools import combinations
from collections import deque
+from decimal import Decimal
import datetime as dt
import dateutil
@@ -8,17 +9,17 @@
from numpy.random import randn
from datetime import datetime
-from pandas.compat import StringIO, iteritems, PY2
+from pandas.compat import Iterable, StringIO, iteritems, PY2
import pandas as pd
from pandas import (DataFrame, concat,
read_csv, isna, Series, date_range,
Index, Panel, MultiIndex, Timestamp,
DatetimeIndex, Categorical)
-from pandas.compat import Iterable
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util import testing as tm
from pandas.util.testing import (assert_frame_equal,
makeCustomDataframe as mkdf)
+from pandas.tests.extension.decimal import to_decimal
import pytest
@@ -2361,6 +2362,18 @@ def test_concat_datetime_timezone(self):
index=idx1.append(idx1))
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2, reason="Unhashable Decimal dtype")
+ def test_concat_different_extension_dtypes_upcasts(self):
+ a = pd.Series(pd.core.arrays.integer_array([1, 2]))
+ b = pd.Series(to_decimal([1, 2]))
+
+ result = pd.concat([a, b], ignore_index=True)
+ expected = pd.Series([
+ 1, 2,
+ Decimal(1), Decimal(2)
+ ], dtype=object)
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
@pytest.mark.parametrize('dt', np.sctypes['float'])
| xref https://github.com/pandas-dev/pandas/issues/22994
This builds on https://github.com/pandas-dev/pandas/pull/22996, since we need hashing.
2a1660c77e8ad0c28485c6d81227ac9d6b6de4f2 is the relevant commit.
Sparse tests will fail for now. I'll revisit once SparseArray is in. | https://api.github.com/repos/pandas-dev/pandas/pulls/22997 | 2018-10-04T21:16:21Z | 2018-10-18T12:01:21Z | 2018-10-18T12:01:21Z | 2018-10-18T12:01:25Z |
API: ExtensionDtype Equality and Hashability | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index c9874b4dd03d6..a1467cbca963a 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -492,6 +492,15 @@ Previous Behavior:
ExtensionType Changes
^^^^^^^^^^^^^^^^^^^^^
+**:class:`pandas.api.extensions.ExtensionDtype` Equality and Hashability**
+
+Pandas now requires that extension dtypes be hashable. The base class implements
+a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should
+update the ``ExtensionDtype._metadata`` tuple to match the signature of your
+``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`).
+
+**Other changes**
+
- ``ExtensionArray`` has gained the abstract methods ``.dropna()`` (:issue:`21185`)
- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore
the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`)
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index b0fa55e346613..ac4d6d1590f38 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -22,14 +22,17 @@ class _DtypeOpsMixin(object):
# of the NA value, not the physical NA vaalue for storage.
# e.g. for JSONArray, this is an empty dictionary.
na_value = np.nan
+ _metadata = ()
def __eq__(self, other):
"""Check whether 'other' is equal to self.
- By default, 'other' is considered equal if
+ By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
- * it's an instance of this type.
+ * it's an instance of this type and all of the
+ the attributes in ``self._metadata`` are equal between
+ `self` and `other`.
Parameters
----------
@@ -40,11 +43,19 @@ def __eq__(self, other):
bool
"""
if isinstance(other, compat.string_types):
- return other == self.name
- elif isinstance(other, type(self)):
- return True
- else:
- return False
+ try:
+ other = self.construct_from_string(other)
+ except TypeError:
+ return False
+ if isinstance(other, type(self)):
+ return all(
+ getattr(self, attr) == getattr(other, attr)
+ for attr in self._metadata
+ )
+ return False
+
+ def __hash__(self):
+ return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other):
return not self.__eq__(other)
@@ -161,6 +172,26 @@ class ExtensionDtype(_DtypeOpsMixin):
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
+ ExtensionDtypes are required to be hashable. The base class provides
+ a default implementation, which relies on the ``_metadata`` class
+ attribute. ``_metadata`` should be a tuple containing the strings
+ that define your data type. For example, with ``PeriodDtype`` that's
+ the ``freq`` attribute.
+
+ **If you have a parametrized dtype you should set the ``_metadata``
+ class property**.
+
+ Ideally, the attributes in ``_metadata`` will match the
+ parameters to your ``ExtensionDtype.__init__`` (if any). If any of
+ the attributes in ``_metadata`` don't implement the standard
+ ``__eq__`` or ``__hash__``, the default implementations here will not
+ work.
+
+ .. versionchanged:: 0.24.0
+
+ Added ``_metadata``, ``__hash__``, and changed the default definition
+ of ``__eq__``.
+
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index beda9bc02f4d5..611cae28877c3 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -101,7 +101,6 @@ class PandasExtensionDtype(_DtypeOpsMixin):
base = None
isbuiltin = 0
isnative = 0
- _metadata = []
_cache = {}
def __unicode__(self):
@@ -209,7 +208,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
kind = 'O'
str = '|O08'
base = np.dtype('O')
- _metadata = ['categories', 'ordered']
+ _metadata = ('categories', 'ordered')
_cache = {}
def __init__(self, categories=None, ordered=None):
@@ -485,7 +484,7 @@ class DatetimeTZDtype(PandasExtensionDtype):
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
- _metadata = ['unit', 'tz']
+ _metadata = ('unit', 'tz')
_match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
_cache = {}
@@ -589,7 +588,7 @@ class PeriodDtype(PandasExtensionDtype):
str = '|O08'
base = np.dtype('O')
num = 102
- _metadata = ['freq']
+ _metadata = ('freq',)
_match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
_cache = {}
@@ -709,7 +708,7 @@ class IntervalDtype(PandasExtensionDtype, ExtensionDtype):
str = '|O08'
base = np.dtype('O')
num = 103
- _metadata = ['subtype']
+ _metadata = ('subtype',)
_match = re.compile(r"(I|i)nterval\[(?P<subtype>.+)\]")
_cache = {}
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 8d1f1cadcc23f..d5cf9571e3622 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -49,6 +49,10 @@ def test_eq_with_str(self, dtype):
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
+ def test_eq_with_self(self, dtype):
+ assert dtype == dtype
+ assert dtype != object()
+
def test_array_type(self, data, dtype):
assert dtype.construct_array_type() is type(data)
@@ -81,3 +85,6 @@ def test_check_dtype(self, data):
index=list('ABCD'))
result = df.dtypes.apply(str) == str(dtype)
self.assert_series_equal(result, expected)
+
+ def test_hashable(self, dtype):
+ hash(dtype) # no error
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 79e1a692f744a..a1ee3a4fefef2 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -15,15 +15,11 @@ class DecimalDtype(ExtensionDtype):
type = decimal.Decimal
name = 'decimal'
na_value = decimal.Decimal('NaN')
+ _metadata = ('context',)
def __init__(self, context=None):
self.context = context or decimal.getcontext()
- def __eq__(self, other):
- if isinstance(other, type(self)):
- return self.context == other.context
- return super(DecimalDtype, self).__eq__(other)
-
def __repr__(self):
return 'DecimalDtype(context={})'.format(self.context)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index dd625d6e1eb3c..d65b4dde832e4 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -3,6 +3,7 @@
import numpy as np
import pandas as pd
+from pandas import compat
import pandas.util.testing as tm
import pytest
@@ -93,7 +94,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseDecimal, base.BaseDtypeTests):
- pass
+ @pytest.mark.skipif(compat.PY2, reason="Context not hashable.")
+ def test_hashable(self, dtype):
+ pass
class TestInterface(BaseDecimal, base.BaseInterfaceTests):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 87876d84bef99..976511941042d 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -27,6 +27,7 @@
class JSONDtype(ExtensionDtype):
type = compat.Mapping
name = 'json'
+
try:
na_value = collections.UserDict()
except AttributeError:
| Implements a default `__eq__` and `__hash__` for ExtensionDtype. Adds a test ensure that they're defined.
Do people have thoughts on `_metadata`? I've tried to document everywhere the importance of it for parametrized extension dtypes. If you fail to set it correctly and use the default `__eq__` and `__hash__`, you'll end up with bizarre behavior like `Period('D') == Period("M")` being true, and having the same hash.
A more heavyweight alternative is to do some metaclass trickery to inspect `ExtensionDtype.__init__` for parameters and set `_metadata` based on that, but I tend to stay away from metaclasses unless they're absolutely necessary.
Closes https://github.com/pandas-dev/pandas/issues/22476 | https://api.github.com/repos/pandas-dev/pandas/pulls/22996 | 2018-10-04T20:15:26Z | 2018-10-08T21:21:08Z | 2018-10-08T21:21:08Z | 2018-10-08T21:21:12Z |
use tm.assert_equal instead of parametrizing assert funcs | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 65e151feeba67..445f9a7e5e980 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -880,7 +880,7 @@ If your change involves checking that a warning is actually emitted, use
.. code-block:: python
- with tm.assert_prodcues_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning):
df.some_operation()
We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 3b7d6a709230b..74703e2837c4a 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -180,9 +180,9 @@ def test_to_datetime_format_weeks(self, cache):
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
- @pytest.mark.parametrize("box,const,assert_equal", [
- [True, pd.Index, 'assert_index_equal'],
- [False, np.array, 'assert_numpy_array_equal']])
+ @pytest.mark.parametrize("box,const", [
+ [True, pd.Index],
+ [False, np.array]])
@pytest.mark.parametrize("fmt,dates,expected_dates", [
['%Y-%m-%d %H:%M:%S %Z',
['2010-01-01 12:00:00 UTC'] * 2,
@@ -215,12 +215,11 @@ def test_to_datetime_format_weeks(self, cache):
pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(0))]]])
def test_to_datetime_parse_tzname_or_tzoffset(self, box, const,
- assert_equal, fmt,
- dates, expected_dates):
+ fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt, box=box)
expected = const(expected_dates)
- getattr(tm, assert_equal)(result, expected)
+ tm.assert_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(dates, format=fmt, box=box, utc=True)
@@ -1049,17 +1048,16 @@ def test_to_datetime_types(self, cache):
# assert result == expected
@pytest.mark.parametrize('cache', [True, False])
- @pytest.mark.parametrize('box, klass, assert_method', [
- [True, Index, 'assert_index_equal'],
- [False, np.array, 'assert_numpy_array_equal']
+ @pytest.mark.parametrize('box, klass', [
+ [True, Index],
+ [False, np.array]
])
- def test_to_datetime_unprocessable_input(self, cache, box, klass,
- assert_method):
+ def test_to_datetime_unprocessable_input(self, cache, box, klass):
# GH 4928
# GH 21864
result = to_datetime([1, '1'], errors='ignore', cache=cache, box=box)
expected = klass(np.array([1, '1'], dtype='O'))
- getattr(tm, assert_method)(result, expected)
+ tm.assert_equal(result, expected)
pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise',
cache=cache, box=box)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index a6b217a37bd0c..bc8582d9b7d29 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -312,19 +312,16 @@ def test_nat_arithmetic_index():
tm.assert_index_equal(NaT - tdi, tdi_nat)
-@pytest.mark.parametrize('box, assert_func', [
- (TimedeltaIndex, tm.assert_index_equal),
- (Series, tm.assert_series_equal)
-])
-def test_nat_arithmetic_td64_vector(box, assert_func):
+@pytest.mark.parametrize('box', [TimedeltaIndex, Series])
+def test_nat_arithmetic_td64_vector(box):
# GH#19124
vec = box(['1 day', '2 day'], dtype='timedelta64[ns]')
box_nat = box([NaT, NaT], dtype='timedelta64[ns]')
- assert_func(vec + NaT, box_nat)
- assert_func(NaT + vec, box_nat)
- assert_func(vec - NaT, box_nat)
- assert_func(NaT - vec, box_nat)
+ tm.assert_equal(vec + NaT, box_nat)
+ tm.assert_equal(NaT + vec, box_nat)
+ tm.assert_equal(vec - NaT, box_nat)
+ tm.assert_equal(NaT - vec, box_nat)
def test_nat_pinned_docstrings():
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index b8fabbf52159d..bda4d71d58e82 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -2516,10 +2516,8 @@ def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
- @pytest.mark.parametrize('klass,assert_func',
- [(Series, tm.assert_series_equal),
- (DatetimeIndex, tm.assert_index_equal)])
- def test_vectorized_offset_addition(self, klass, assert_func):
+ @pytest.mark.parametrize('klass', [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
@@ -2527,8 +2525,8 @@ def test_vectorized_offset_addition(self, klass, assert_func):
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
- assert_func(result, exp)
- assert_func(result2, exp)
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
@@ -2536,8 +2534,8 @@ def test_vectorized_offset_addition(self, klass, assert_func):
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
- assert_func(result, exp)
- assert_func(result2, exp)
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
@@ -2692,18 +2690,16 @@ def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
- @pytest.mark.parametrize('klass,assert_func',
- [(Series, tm.assert_series_equal),
- (DatetimeIndex, tm.assert_index_equal)])
- def test_vectorized_offset_addition(self, klass, assert_func):
+ @pytest.mark.parametrize('klass', [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
- assert_func(result, exp)
- assert_func(result2, exp)
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
@@ -2711,8 +2707,8 @@ def test_vectorized_offset_addition(self, klass, assert_func):
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
- assert_func(result, exp)
- assert_func(result2, exp)
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
def test_Easter():
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3db251e89842d..4e01e0feb004c 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1522,8 +1522,8 @@ def assert_equal(left, right, **kwargs):
Parameters
----------
- left : Index, Series, or DataFrame
- right : Index, Series, or DataFrame
+ left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
+ right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
if isinstance(left, pd.Index):
@@ -1532,6 +1532,10 @@ def assert_equal(left, right, **kwargs):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
+ elif isinstance(left, ExtensionArray):
+ assert_extension_array_equal(left, right, **kwargs)
+ elif isinstance(left, np.ndarray):
+ assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
| https://api.github.com/repos/pandas-dev/pandas/pulls/22995 | 2018-10-04T19:03:07Z | 2018-10-04T21:53:44Z | 2018-10-04T21:53:44Z | 2020-04-05T17:38:52Z | |
CircleCI -> Azure | diff --git a/.circleci/config.yml b/.circleci/config.yml
index e947f30d285cd..5b10036818901 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,39 +1,6 @@
version: 2
jobs:
- # --------------------------------------------------------------------------
- # 0. py27_compat
- # --------------------------------------------------------------------------
- py27_compat:
- docker:
- - image: continuumio/miniconda:latest
- # databases configuration
- - image: circleci/postgres:9.6.5-alpine-ram
- environment:
- POSTGRES_USER: postgres
- POSTGRES_DB: pandas_nosetest
- - image: circleci/mysql:8-ram
- environment:
- MYSQL_USER: "root"
- MYSQL_HOST: "localhost"
- MYSQL_ALLOW_EMPTY_PASSWORD: "true"
- MYSQL_DATABASE: "pandas_nosetest"
- environment:
- JOB: "2.7_COMPAT"
- ENV_FILE: "ci/circle-27-compat.yaml"
- LOCALE_OVERRIDE: "it_IT.UTF-8"
- MINICONDA_DIR: /home/ubuntu/miniconda3
- steps:
- - checkout
- - run:
- name: build
- command: |
- ./ci/install_circle.sh
- ./ci/show_circle.sh
- - run:
- name: test
- command: ./ci/run_circle.sh --skip-slow --skip-network
-
# --------------------------------------------------------------------------
# 1. py36_locale
# --------------------------------------------------------------------------
@@ -62,86 +29,14 @@ jobs:
- run:
name: build
command: |
- ./ci/install_circle.sh
- ./ci/show_circle.sh
+ ./ci/circle/install_circle.sh
+ ./ci/circle/show_circle.sh
- run:
name: test
- command: ./ci/run_circle.sh --skip-slow --skip-network
-
- # --------------------------------------------------------------------------
- # 2. py36_locale_slow
- # --------------------------------------------------------------------------
- py36_locale_slow:
- docker:
- - image: continuumio/miniconda:latest
- # databases configuration
- - image: circleci/postgres:9.6.5-alpine-ram
- environment:
- POSTGRES_USER: postgres
- POSTGRES_DB: pandas_nosetest
- - image: circleci/mysql:8-ram
- environment:
- MYSQL_USER: "root"
- MYSQL_HOST: "localhost"
- MYSQL_ALLOW_EMPTY_PASSWORD: "true"
- MYSQL_DATABASE: "pandas_nosetest"
-
- environment:
- JOB: "3.6_LOCALE_SLOW"
- ENV_FILE: "ci/circle-36-locale_slow.yaml"
- LOCALE_OVERRIDE: "zh_CN.UTF-8"
- MINICONDA_DIR: /home/ubuntu/miniconda3
- steps:
- - checkout
- - run:
- name: build
- command: |
- ./ci/install_circle.sh
- ./ci/show_circle.sh
- - run:
- name: test
- command: ./ci/run_circle.sh --only-slow --skip-network
-
- # --------------------------------------------------------------------------
- # 3. py35_ascii
- # --------------------------------------------------------------------------
- py35_ascii:
- docker:
- - image: continuumio/miniconda:latest
- # databases configuration
- - image: circleci/postgres:9.6.5-alpine-ram
- environment:
- POSTGRES_USER: postgres
- POSTGRES_DB: pandas_nosetest
- - image: circleci/mysql:8-ram
- environment:
- MYSQL_USER: "root"
- MYSQL_HOST: "localhost"
- MYSQL_ALLOW_EMPTY_PASSWORD: "true"
- MYSQL_DATABASE: "pandas_nosetest"
-
- environment:
- JOB: "3.5_ASCII"
- ENV_FILE: "ci/circle-35-ascii.yaml"
- LOCALE_OVERRIDE: "C"
- MINICONDA_DIR: /home/ubuntu/miniconda3
- steps:
- - checkout
- - run:
- name: build
- command: |
- ./ci/install_circle.sh
- ./ci/show_circle.sh
- - run:
- name: test
- command: ./ci/run_circle.sh --skip-slow --skip-network
-
+ command: ./ci/circle/run_circle.sh --skip-slow --skip-network
workflows:
version: 2
build_and_test:
jobs:
- - py27_compat
- py36_locale
- - py36_locale_slow
- - py35_ascii
diff --git a/.travis.yml b/.travis.yml
index e8f7f3465bfd5..8ac4d827b0820 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -116,10 +116,10 @@ after_success:
after_script:
- echo "after_script start"
- source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
- - if [ -e /tmp/single.xml ]; then
- ci/print_skipped.py /tmp/single.xml;
+ - if [ -e test-data-single.xml ]; then
+ ci/print_skipped.py test-data-single.xml;
fi
- - if [ -e /tmp/multiple.xml ]; then
- ci/print_skipped.py /tmp/multiple.xml;
+ - if [ -e test-data-multiple.xml ]; then
+ ci/print_skipped.py test-data-multiple.xml;
fi
- echo "after_script done"
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 5d473bfc5a38c..373c22fdf8e62 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -7,10 +7,10 @@ jobs:
parameters:
name: macOS
vmImage: xcode9-macos10.13
-# - template: ci/azure/linux.yml
-# parameters:
-# name: Linux
-# vmImage: ubuntu-16.04
+- template: ci/azure/linux.yml
+ parameters:
+ name: Linux
+ vmImage: ubuntu-16.04
# Windows Python 2.7 needs VC 9.0 installed, and not sure
# how to make that a conditional task, so for now these are
diff --git a/ci/circle-27-compat.yaml b/ci/azure-27-compat.yaml
similarity index 100%
rename from ci/circle-27-compat.yaml
rename to ci/azure-27-compat.yaml
diff --git a/ci/circle-36-locale_slow.yaml b/ci/azure-36-locale_slow.yaml
similarity index 100%
rename from ci/circle-36-locale_slow.yaml
rename to ci/azure-36-locale_slow.yaml
diff --git a/ci/azure-37-locale.yaml b/ci/azure-37-locale.yaml
new file mode 100644
index 0000000000000..ef97b85406709
--- /dev/null
+++ b/ci/azure-37-locale.yaml
@@ -0,0 +1,35 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - cython>=0.28.2
+ - html5lib
+ - ipython
+ - jinja2
+ - lxml
+ - matplotlib
+ - nomkl
+ - numexpr
+ - numpy
+ - openpyxl=2.5.5
+ - psycopg2
+ - pymysql
+ - pytables
+ - python-dateutil
+ - python=3.6*
+ - pytz
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - moto
+ - pip:
+ - hypothesis>=3.58.0
diff --git a/ci/azure/linux.yml b/ci/azure/linux.yml
new file mode 100644
index 0000000000000..f34cba69a6195
--- /dev/null
+++ b/ci/azure/linux.yml
@@ -0,0 +1,56 @@
+parameters:
+ name: ''
+ vmImage: ''
+
+jobs:
+- job: ${{ parameters.name }}
+ pool:
+ vmImage: ${{ parameters.vmImage }}
+ strategy:
+ maxParallel: 11
+ matrix:
+ py27_np_19:
+ ENV_FILE: ci/azure-27-compat.yaml
+ CONDA_PY: "27"
+ CONDA_ENV: pandas
+ TEST_ARGS: "--skip-slow --skip-network"
+
+ py36_locale:
+ ENV_FILE: ci/azure-37-locale.yaml
+ CONDA_PY: "37"
+ CONDA_ENV: pandas
+ TEST_ARGS: "--skip-slow --skip-network"
+ LOCALE_OVERRIDE: "zh_CN.UTF-8"
+
+ py36_locale_slow:
+ ENV_FILE: ci/azure-36-locale_slow.yaml
+ CONDA_PY: "36"
+ CONDA_ENV: pandas
+ TEST_ARGS: "--only-slow --skip-network"
+
+ steps:
+ - script: |
+ if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386; fi
+ echo "Installing Miniconda"{
+ ci/incremental/install_miniconda.sh
+ export PATH=$HOME/miniconda3/bin:$PATH
+ echo "Setting up Conda environment"
+ ci/incremental/setup_conda_environment.sh
+ displayName: 'Before Install'
+ - script: |
+ export PATH=$HOME/miniconda3/bin:$PATH
+ ci/incremental/build.sh
+ displayName: 'Build'
+ - script: |
+ export PATH=$HOME/miniconda3/bin:$PATH
+ ci/script_single.sh
+ ci/script_multi.sh
+ echo "[Test done]"
+ displayName: 'Test'
+ - script: |
+ export PATH=$HOME/miniconda3/bin:$PATH
+ source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFiles: 'test-data-*.xml'
+ testRunTitle: 'Linux'
\ No newline at end of file
diff --git a/ci/azure/macos.yml b/ci/azure/macos.yml
index fb10d89731f26..53ce51c76683c 100644
--- a/ci/azure/macos.yml
+++ b/ci/azure/macos.yml
@@ -39,5 +39,5 @@ jobs:
source activate pandas && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
- task: PublishTestResults@2
inputs:
- testResultsFiles: '/tmp/*.xml'
+ testResultsFiles: 'test-data-*.xml'
testRunTitle: 'MacOS-35'
diff --git a/ci/circle-35-ascii.yaml b/ci/circle-35-ascii.yaml
deleted file mode 100644
index 281ed59e2deff..0000000000000
--- a/ci/circle-35-ascii.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-name: pandas
-channels:
- - defaults
-dependencies:
- - cython>=0.28.2
- - nomkl
- - numpy
- - python-dateutil
- - python=3.5*
- - pytz
- # universal
- - pytest
- - pytest-xdist
- - pip:
- - hypothesis>=3.58.0
diff --git a/ci/install_circle.sh b/ci/circle/install_circle.sh
similarity index 100%
rename from ci/install_circle.sh
rename to ci/circle/install_circle.sh
diff --git a/ci/run_circle.sh b/ci/circle/run_circle.sh
similarity index 100%
rename from ci/run_circle.sh
rename to ci/circle/run_circle.sh
diff --git a/ci/show_circle.sh b/ci/circle/show_circle.sh
similarity index 100%
rename from ci/show_circle.sh
rename to ci/circle/show_circle.sh
diff --git a/ci/incremental/setup_conda_environment.sh b/ci/incremental/setup_conda_environment.sh
index c716a39138644..f3ac99d5e7c5a 100755
--- a/ci/incremental/setup_conda_environment.sh
+++ b/ci/incremental/setup_conda_environment.sh
@@ -27,13 +27,17 @@ set -v
# w/o removing anything else
echo
echo "[removing installed pandas]"
-conda remove pandas -y --force
-pip uninstall -y pandas
+conda remove pandas -y --force || true
+pip uninstall -y pandas || true
echo
echo "[no installed pandas]"
conda list pandas
+if [ -n "$LOCALE_OVERRIDE" ]; then
+ sudo locale-gen "$LOCALE_OVERRIDE"
+fi
+
# # Install the compiler toolchain
# if [[ $(uname) == Linux ]]; then
# if [[ "$CONDA_SUBDIR" == "linux-32" || "$BITS32" == "yes" ]] ; then
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index dcc5a14d7b3b4..e076558e8fff3 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -27,17 +27,17 @@ if [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
elif [ "$COVERAGE" ]; then
- echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas
+ pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas
elif [ "$SLOW" ]; then
TEST_ARGS="--only-slow --skip-network"
- echo pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ echo pytest -m "not single and slow" -v --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas
+ pytest -m "not single and slow" -v --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas
else
- echo pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas # TODO: doctest
+ echo pytest -n 2 -m "not single" --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas
+ pytest -n 2 -m "not single" --junitxml=test-data-multiple.xml --strict $TEST_ARGS pandas # TODO: doctest
fi
diff --git a/ci/script_single.sh b/ci/script_single.sh
index 09e7446a2d876..42d326e0965ee 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -5,8 +5,9 @@ echo "[script_single]"
source activate pandas
if [ -n "$LOCALE_OVERRIDE" ]; then
+ echo "Setting LC_ALL and LANG to $LOCALE_OVERRIDE"
export LC_ALL="$LOCALE_OVERRIDE";
- echo "Setting LC_ALL to $LOCALE_OVERRIDE"
+ export LANG="$LOCALE_OVERRIDE";
pycmd='import pandas; print("pandas detected console encoding: %s" % pandas.get_option("display.encoding"))'
python -c "$pycmd"
@@ -25,14 +26,13 @@ if [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
elif [ "$COVERAGE" ]; then
- echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
- pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
-
+ echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=test-data-single.xml $TEST_ARGS pandas
+ pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=test-data-single.xml $TEST_ARGS pandas
echo pytest -s --strict scripts
pytest -s --strict scripts
else
- echo pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas
- pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest
+ echo pytest -m "single" --junitxml=test-data-single.xml --strict $TEST_ARGS pandas
+ pytest -m "single" --junitxml=test-data-single.xml --strict $TEST_ARGS pandas
fi
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 67b8d287d5d1a..66d545a0de6e9 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -684,7 +684,7 @@ Test-driven development/code writing
------------------------------------
*pandas* is serious about testing and strongly encourages contributors to embrace
-`test-driven development (TDD) <http://en.wikipedia.org/wiki/Test-driven_development>`_.
+`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_.
This development process "relies on the repetition of a very short development cycle:
first the developer writes an (initially failing) automated test case that defines a desired
improvement or new function, then produces the minimum amount of code to pass that test."
| Closes #22396
closes #22763
cc @jreback @h-vetinari @jorisvandenbossche @WillAyd
The main thing we lose is redundancy on the sql tests. Those are down to just travis. | https://api.github.com/repos/pandas-dev/pandas/pulls/22992 | 2018-10-04T14:50:58Z | 2018-10-26T12:07:44Z | 2018-10-26T12:07:43Z | 2018-10-26T12:08:08Z |
CI: Pin IPython for doc build | diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml
index 50626088d5bc4..8353659e7b9a9 100644
--- a/ci/travis-36-doc.yaml
+++ b/ci/travis-36-doc.yaml
@@ -12,7 +12,7 @@ dependencies:
- html5lib
- hypothesis>=3.58.0
- ipykernel
- - ipython
+ - ipython==6.5.0
- ipywidgets
- lxml
- matplotlib
| xref #22990 https://github.com/ipython/ipython/issues/11362 | https://api.github.com/repos/pandas-dev/pandas/pulls/22991 | 2018-10-04T14:21:26Z | 2018-10-04T15:15:58Z | 2018-10-04T15:15:58Z | 2018-10-05T04:21:17Z |
BUG: Perform i8 conversion for datetimelike IntervalTree queries | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index f246ebad3aa2c..42c4134437ff6 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -755,7 +755,7 @@ Interval
- Bug in the :class:`IntervalIndex` constructor where the ``closed`` parameter did not always override the inferred ``closed`` (:issue:`19370`)
- Bug in the ``IntervalIndex`` repr where a trailing comma was missing after the list of intervals (:issue:`20611`)
- Bug in :class:`Interval` where scalar arithmetic operations did not retain the ``closed`` value (:issue:`22313`)
--
+- Bug in :class:`IntervalIndex` where indexing with datetime-like values raised a ``KeyError`` (:issue:`20636`)
Indexing
^^^^^^^^
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f72f87aeb2af6..25d4dd0cbcc81 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -6,12 +6,14 @@
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
-from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
+from pandas.core.dtypes.cast import (
+ find_common_type, maybe_downcast_to_dtype, infer_dtype_from_scalar)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
+ is_dtype_equal,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
@@ -29,8 +31,8 @@
Interval, IntervalMixin, IntervalTree,
)
-from pandas.core.indexes.datetimes import date_range
-from pandas.core.indexes.timedeltas import timedelta_range
+from pandas.core.indexes.datetimes import date_range, DatetimeIndex
+from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
@@ -192,7 +194,9 @@ def _isnan(self):
@cache_readonly
def _engine(self):
- return IntervalTree(self.left, self.right, closed=self.closed)
+ left = self._maybe_convert_i8(self.left)
+ right = self._maybe_convert_i8(self.right)
+ return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key):
"""
@@ -514,6 +518,78 @@ def _maybe_cast_indexed(self, key):
return key
+ def _needs_i8_conversion(self, key):
+ """
+ Check if a given key needs i8 conversion. Conversion is necessary for
+ Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
+ Interval-like requires conversion if it's endpoints are one of the
+ aforementioned types.
+
+ Assumes that any list-like data has already been cast to an Index.
+
+ Parameters
+ ----------
+ key : scalar or Index-like
+ The key that should be checked for i8 conversion
+
+ Returns
+ -------
+ boolean
+ """
+ if is_interval_dtype(key) or isinstance(key, Interval):
+ return self._needs_i8_conversion(key.left)
+
+ i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
+ return isinstance(key, i8_types)
+
+ def _maybe_convert_i8(self, key):
+ """
+ Maybe convert a given key to it's equivalent i8 value(s). Used as a
+ preprocessing step prior to IntervalTree queries (self._engine), which
+ expects numeric data.
+
+ Parameters
+ ----------
+ key : scalar or list-like
+ The key that should maybe be converted to i8.
+
+ Returns
+ -------
+ key: scalar or list-like
+ The original key if no conversion occured, int if converted scalar,
+ Int64Index if converted list-like.
+ """
+ original = key
+ if is_list_like(key):
+ key = ensure_index(key)
+
+ if not self._needs_i8_conversion(key):
+ return original
+
+ scalar = is_scalar(key)
+ if is_interval_dtype(key) or isinstance(key, Interval):
+ # convert left/right and reconstruct
+ left = self._maybe_convert_i8(key.left)
+ right = self._maybe_convert_i8(key.right)
+ constructor = Interval if scalar else IntervalIndex.from_arrays
+ return constructor(left, right, closed=self.closed)
+
+ if scalar:
+ # Timestamp/Timedelta
+ key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
+ else:
+ # DatetimeIndex/TimedeltaIndex
+ key_dtype, key_i8 = key.dtype, Index(key.asi8)
+
+ # ensure consistency with IntervalIndex subtype
+ subtype = self.dtype.subtype
+ msg = ('Cannot index an IntervalIndex of subtype {subtype} with '
+ 'values of dtype {other}')
+ if not is_dtype_equal(subtype, key_dtype):
+ raise ValueError(msg.format(subtype=subtype, other=key_dtype))
+
+ return key_i8
+
def _check_method(self, method):
if method is None:
return
@@ -648,6 +724,7 @@ def get_loc(self, key, method=None):
else:
# use the interval tree
+ key = self._maybe_convert_i8(key)
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
@@ -711,8 +788,10 @@ def _get_reindexer(self, target):
"""
# find the left and right indexers
- lindexer = self._engine.get_indexer(target.left.values)
- rindexer = self._engine.get_indexer(target.right.values)
+ left = self._maybe_convert_i8(target.left)
+ right = self._maybe_convert_i8(target.right)
+ lindexer = self._engine.get_indexer(left.values)
+ rindexer = self._engine.get_indexer(right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 71f56c5bc1164..0ff5ab232d670 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -1,7 +1,9 @@
from __future__ import division
+from itertools import permutations
import pytest
import numpy as np
+import re
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, date_range, timedelta_range)
@@ -498,6 +500,48 @@ def test_get_loc_length_one(self, item, closed):
result = index.get_loc(item)
assert result == 0
+ # Make consistent with test_interval_new.py (see #16316, #16386)
+ @pytest.mark.parametrize('breaks', [
+ date_range('20180101', periods=4),
+ date_range('20180101', periods=4, tz='US/Eastern'),
+ timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
+ def test_get_loc_datetimelike_nonoverlapping(self, breaks):
+ # GH 20636
+ # nonoverlapping = IntervalIndex method and no i8 conversion
+ index = IntervalIndex.from_breaks(breaks)
+
+ value = index[0].mid
+ result = index.get_loc(value)
+ expected = 0
+ assert result == expected
+
+ interval = Interval(index[0].left, index[1].right)
+ result = index.get_loc(interval)
+ expected = slice(0, 2)
+ assert result == expected
+
+ # Make consistent with test_interval_new.py (see #16316, #16386)
+ @pytest.mark.parametrize('arrays', [
+ (date_range('20180101', periods=4), date_range('20180103', periods=4)),
+ (date_range('20180101', periods=4, tz='US/Eastern'),
+ date_range('20180103', periods=4, tz='US/Eastern')),
+ (timedelta_range('0 days', periods=4),
+ timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
+ def test_get_loc_datetimelike_overlapping(self, arrays):
+ # GH 20636
+ # overlapping = IntervalTree method with i8 conversion
+ index = IntervalIndex.from_arrays(*arrays)
+
+ value = index[0].mid + Timedelta('12 hours')
+ result = np.sort(index.get_loc(value))
+ expected = np.array([0, 1], dtype='int64')
+ assert tm.assert_numpy_array_equal(result, expected)
+
+ interval = Interval(index[0].left, index[1].right)
+ result = np.sort(index.get_loc(interval))
+ expected = np.array([0, 1, 2], dtype='int64')
+ assert tm.assert_numpy_array_equal(result, expected)
+
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
@@ -555,6 +599,97 @@ def test_get_indexer_length_one(self, item, closed):
expected = np.array([0] * len(item), dtype='intp')
tm.assert_numpy_array_equal(result, expected)
+ # Make consistent with test_interval_new.py (see #16316, #16386)
+ @pytest.mark.parametrize('arrays', [
+ (date_range('20180101', periods=4), date_range('20180103', periods=4)),
+ (date_range('20180101', periods=4, tz='US/Eastern'),
+ date_range('20180103', periods=4, tz='US/Eastern')),
+ (timedelta_range('0 days', periods=4),
+ timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
+ def test_get_reindexer_datetimelike(self, arrays):
+ # GH 20636
+ index = IntervalIndex.from_arrays(*arrays)
+ tuples = [(index[0].left, index[0].left + pd.Timedelta('12H')),
+ (index[-1].right - pd.Timedelta('12H'), index[-1].right)]
+ target = IntervalIndex.from_tuples(tuples)
+
+ result = index._get_reindexer(target)
+ expected = np.array([0, 3], dtype='int64')
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('breaks', [
+ date_range('20180101', periods=4),
+ date_range('20180101', periods=4, tz='US/Eastern'),
+ timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
+ def test_maybe_convert_i8(self, breaks):
+ # GH 20636
+ index = IntervalIndex.from_breaks(breaks)
+
+ # intervalindex
+ result = index._maybe_convert_i8(index)
+ expected = IntervalIndex.from_breaks(breaks.asi8)
+ tm.assert_index_equal(result, expected)
+
+ # interval
+ interval = Interval(breaks[0], breaks[1])
+ result = index._maybe_convert_i8(interval)
+ expected = Interval(breaks[0].value, breaks[1].value)
+ assert result == expected
+
+ # datetimelike index
+ result = index._maybe_convert_i8(breaks)
+ expected = Index(breaks.asi8)
+ tm.assert_index_equal(result, expected)
+
+ # datetimelike scalar
+ result = index._maybe_convert_i8(breaks[0])
+ expected = breaks[0].value
+ assert result == expected
+
+ # list-like of datetimelike scalars
+ result = index._maybe_convert_i8(list(breaks))
+ expected = Index(breaks.asi8)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('breaks', [
+ np.arange(5, dtype='int64'),
+ np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
+ @pytest.mark.parametrize('make_key', [
+ IntervalIndex.from_breaks,
+ lambda breaks: Interval(breaks[0], breaks[1]),
+ lambda breaks: breaks,
+ lambda breaks: breaks[0],
+ list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
+ def test_maybe_convert_i8_numeric(self, breaks, make_key):
+ # GH 20636
+ index = IntervalIndex.from_breaks(breaks)
+ key = make_key(breaks)
+
+ # no conversion occurs for numeric
+ result = index._maybe_convert_i8(key)
+ assert result is key
+
+ @pytest.mark.parametrize('breaks1, breaks2', permutations([
+ date_range('20180101', periods=4),
+ date_range('20180101', periods=4, tz='US/Eastern'),
+ timedelta_range('0 days', periods=4)], 2), ids=lambda x: str(x.dtype))
+ @pytest.mark.parametrize('make_key', [
+ IntervalIndex.from_breaks,
+ lambda breaks: Interval(breaks[0], breaks[1]),
+ lambda breaks: breaks,
+ lambda breaks: breaks[0],
+ list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
+ def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
+ # GH 20636
+ index = IntervalIndex.from_breaks(breaks1)
+ key = make_key(breaks2)
+
+ msg = ('Cannot index an IntervalIndex of subtype {dtype1} with '
+ 'values of dtype {dtype2}')
+ msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
+ with tm.assert_raises_regex(ValueError, msg):
+ index._maybe_convert_i8(key)
+
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
| - [X] closes #20636
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
I've seen a few complaints about this on StackOverflow, so wanted to get this into 0.24.0 before I try implementing larger changes like the new behavior specs and interval accessor. | https://api.github.com/repos/pandas-dev/pandas/pulls/22988 | 2018-10-04T05:54:52Z | 2018-10-07T22:35:36Z | 2018-10-07T22:35:36Z | 2018-10-08T17:00:55Z |
BUG-22984 Fix truncation of DataFrame representations | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 44c467795d1ed..9275357e5ad18 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1319,7 +1319,8 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form
- :func:`read_sas()` will correctly parse sas7bdat files with many columns (:issue:`22628`)
- :func:`read_sas()` will correctly parse sas7bdat files with data page types having also bit 7 set (so page type is 128 + 256 = 384) (:issue:`16615`)
- Bug in :meth:`detect_client_encoding` where potential ``IOError`` goes unhandled when importing in a mod_wsgi process due to restricted access to stdout. (:issue:`21552`)
-- Bug in :func:`to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`)
+- Bug in :func:`DataFrame.to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`)
+- Bug in :func:`DataFrame.to_string()` that caused representations of :class:`DataFrame` to not take up the whole window (:issue:`22984`)
- Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`).
- Bug in :meth:`HDFStore.append` when appending a :class:`DataFrame` with an empty string column and ``min_itemsize`` < 8 (:issue:`12242`)
- Bug in :meth:`read_csv()` in which :class:`MultiIndex` index names were being improperly handled in the cases when they were not provided (:issue:`23484`)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 6f64605bcf175..9857129f56b0c 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -608,11 +608,6 @@ def to_string(self):
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
max_len = Series(text).str.len().max()
- headers = [ele[0] for ele in strcols]
- # Size of last col determines dot col size. See
- # `self._to_str_columns
- size_tr_col = len(headers[self.tr_size_col])
- max_len += size_tr_col # Need to make space for largest row
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 28aa8a92cc410..b8ca8cb73c7e9 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -305,14 +305,10 @@ def test_repr_non_interactive(self):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
- def test_repr_truncates_terminal_size(self):
+ def test_repr_truncates_terminal_size(self, mock):
# https://github.com/pandas-dev/pandas/issues/21180
# TODO: use mock fixutre.
# This is being backported, so doing it directly here.
- try:
- from unittest import mock
- except ImportError:
- mock = pytest.importorskip("mock")
terminal_size = (118, 96)
p1 = mock.patch('pandas.io.formats.console.get_terminal_size',
@@ -343,6 +339,17 @@ def test_repr_truncates_terminal_size(self):
assert df2.columns[0] in result.split('\n')[0]
+ def test_repr_truncates_terminal_size_full(self, mock):
+ # GH 22984 ensure entire window is filled
+ terminal_size = (80, 24)
+ df = pd.DataFrame(np.random.rand(1, 7))
+ p1 = mock.patch('pandas.io.formats.console.get_terminal_size',
+ return_value=terminal_size)
+ p2 = mock.patch('pandas.io.formats.format.get_terminal_size',
+ return_value=terminal_size)
+ with p1, p2:
+ assert "..." not in str(df)
+
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
| - [X] closes #22984
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
When printing a DataFrame to terminal, an extra column's worth of space is added to the calculated width of the DataFrame. This is presumably to help edge cases, but the calculated difference between the DataFrame width and the terminal window width is incremented by 1 a few lines later, seemingly to fix the same problem. Do any more experienced developers know of a reason to pad the DataFrame width even more? | https://api.github.com/repos/pandas-dev/pandas/pulls/22987 | 2018-10-04T00:10:03Z | 2018-11-15T03:02:25Z | 2018-11-15T03:02:25Z | 2018-11-15T03:02:32Z |
CLN: prepare unifying hashtable.factorize and .unique; add doc-strings | diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 3ff98b7b5a9b5..c061102fbaddc 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -355,19 +355,38 @@ cdef class {{name}}HashTable(HashTable):
return np.asarray(locs)
- def factorize(self, {{dtype}}_t values):
- uniques = {{name}}Vector()
- labels = self.get_labels(values, uniques, 0, 0)
- return uniques.to_array(), labels
-
@cython.boundscheck(False)
- def get_labels(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
- Py_ssize_t count_prior, Py_ssize_t na_sentinel,
+ @cython.wraparound(False)
+ def _factorize(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
+ """
+ Calculate unique values and labels (no sorting); ignores all NA-values
+
+ Parameters
+ ----------
+ values : ndarray[{{dtype}}]
+ Array of values of which unique will be calculated
+ uniques : {{name}}Vector
+ Vector into which uniques will be written
+ count_prior : Py_ssize_t, default 0
+ Number of existing entries in uniques
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing. If na_value is None, then
+ any value satisfying val!=val are considered missing.
+
+ Returns
+ -------
+ uniques : ndarray[{{dtype}}]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
cdef:
- Py_ssize_t i, n = len(values)
+ Py_ssize_t i, idx, count = count_prior, n = len(values)
int64_t[:] labels
- Py_ssize_t idx, count = count_prior
int ret = 0
{{dtype}}_t val, na_value2
khiter_t k
@@ -399,9 +418,11 @@ cdef class {{name}}HashTable(HashTable):
k = kh_get_{{dtype}}(self.table, val)
if k != self.table.n_buckets:
+ # k falls into a previous bucket
idx = self.table.vals[k]
labels[i] = idx
else:
+ # k hasn't been seen yet
k = kh_put_{{dtype}}(self.table, val, &ret)
self.table.vals[k] = count
@@ -418,6 +439,19 @@ cdef class {{name}}HashTable(HashTable):
return np.asarray(labels)
+ def factorize(self, const {{dtype}}_t[:] values, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ uniques = {{name}}Vector()
+ labels = self._factorize(values, uniques=uniques,
+ na_sentinel=na_sentinel, na_value=na_value)
+ return labels, uniques.to_array()
+
+ def get_labels(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ return self._factorize(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value)
+
@cython.boundscheck(False)
def get_labels_groupby(self, const {{dtype}}_t[:] values):
cdef:
@@ -464,7 +498,21 @@ cdef class {{name}}HashTable(HashTable):
return np.asarray(labels), arr_uniques
@cython.boundscheck(False)
+ @cython.wraparound(False)
def unique(self, const {{dtype}}_t[:] values):
+ """
+ Calculate unique values without sorting
+
+ Parameters
+ ----------
+ values : ndarray[{{dtype}}]
+ Array of values of which unique will be calculated
+
+ Returns
+ -------
+ uniques : ndarray[{{dtype}}]
+ Unique values of input, not sorted
+ """
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -567,7 +615,21 @@ cdef class StringHashTable(HashTable):
return labels
@cython.boundscheck(False)
+ @cython.wraparound(False)
def unique(self, ndarray[object] values):
+ """
+ Calculate unique values without sorting
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ """
cdef:
Py_ssize_t i, count, n = len(values)
int64_t[:] uindexer
@@ -602,11 +664,6 @@ cdef class StringHashTable(HashTable):
uniques.append(values[uindexer[i]])
return uniques.to_array()
- def factorize(self, ndarray[object] values):
- uniques = ObjectVector()
- labels = self.get_labels(values, uniques, 0, 0)
- return uniques.to_array(), labels
-
@cython.boundscheck(False)
def lookup(self, ndarray[object] values):
cdef:
@@ -669,14 +726,37 @@ cdef class StringHashTable(HashTable):
free(vecs)
@cython.boundscheck(False)
- def get_labels(self, ndarray[object] values, ObjectVector uniques,
- Py_ssize_t count_prior, int64_t na_sentinel,
+ @cython.wraparound(False)
+ def _factorize(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
+ """
+ Calculate unique values and labels (no sorting); ignores all NA-values
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ uniques : ObjectVector
+ Vector into which uniques will be written
+ count_prior : Py_ssize_t, default 0
+ Number of existing entries in uniques
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
cdef:
- Py_ssize_t i, n = len(values)
+ Py_ssize_t i, idx, count = count_prior, n = len(values)
int64_t[:] labels
int64_t[:] uindexer
- Py_ssize_t idx, count = count_prior
int ret = 0
object val
const char *v
@@ -684,19 +764,17 @@ cdef class StringHashTable(HashTable):
khiter_t k
bint use_na_value
- # these by-definition *must* be strings
labels = np.zeros(n, dtype=np.int64)
uindexer = np.empty(n, dtype=np.int64)
use_na_value = na_value is not None
- # pre-filter out missing
- # and assign pointers
+ # assign pointers and pre-filter out missing
vecs = <const char **> malloc(n * sizeof(char *))
for i in range(n):
val = values[i]
- if ((PyUnicode_Check(val) or PyString_Check(val)) and
- not (use_na_value and val == na_value)):
+ if ((PyUnicode_Check(val) or PyString_Check(val))
+ and not (use_na_value and val == na_value)):
v = util.get_c_string(val)
vecs[i] = v
else:
@@ -711,9 +789,11 @@ cdef class StringHashTable(HashTable):
v = vecs[i]
k = kh_get_str(self.table, v)
if k != self.table.n_buckets:
+ # k falls into a previous bucket
idx = self.table.vals[k]
labels[i] = <int64_t>idx
else:
+ # k hasn't been seen yet
k = kh_put_str(self.table, v, &ret)
self.table.vals[k] = count
uindexer[count] = i
@@ -728,6 +808,19 @@ cdef class StringHashTable(HashTable):
return np.asarray(labels)
+ def factorize(self, ndarray[object] values, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ uniques = ObjectVector()
+ labels = self._factorize(values, uniques=uniques,
+ na_sentinel=na_sentinel, na_value=na_value)
+ return labels, uniques.to_array()
+
+ def get_labels(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ return self._factorize(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value)
+
cdef class PyObjectHashTable(HashTable):
@@ -814,7 +907,22 @@ cdef class PyObjectHashTable(HashTable):
return np.asarray(locs)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
def unique(self, ndarray[object] values):
+ """
+ Calculate unique values without sorting
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ """
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -832,13 +940,38 @@ cdef class PyObjectHashTable(HashTable):
return uniques.to_array()
- def get_labels(self, ndarray[object] values, ObjectVector uniques,
- Py_ssize_t count_prior, int64_t na_sentinel,
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def _factorize(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
+ """
+ Calculate unique values and labels (no sorting); ignores all NA-values
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ uniques : ObjectVector
+ Vector into which uniques will be written
+ count_prior : Py_ssize_t, default 0
+ Number of existing entries in uniques
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing. If na_value is None, then None _plus_
+ any value satisfying val!=val are considered missing.
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
cdef:
- Py_ssize_t i, n = len(values)
+ Py_ssize_t i, idx, count = count_prior, n = len(values)
int64_t[:] labels
- Py_ssize_t idx, count = count_prior
int ret = 0
object val
khiter_t k
@@ -851,16 +984,18 @@ cdef class PyObjectHashTable(HashTable):
val = values[i]
hash(val)
- if ((val != val or val is None) or
- (use_na_value and val == na_value)):
+ if ((val != val or val is None)
+ or (use_na_value and val == na_value)):
labels[i] = na_sentinel
continue
k = kh_get_pymap(self.table, <PyObject*>val)
if k != self.table.n_buckets:
+ # k falls into a previous bucket
idx = self.table.vals[k]
labels[i] = idx
else:
+ # k hasn't been seen yet
k = kh_put_pymap(self.table, <PyObject*>val, &ret)
self.table.vals[k] = count
uniques.append(val)
@@ -868,3 +1003,16 @@ cdef class PyObjectHashTable(HashTable):
count += 1
return np.asarray(labels)
+
+ def factorize(self, ndarray[object] values, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ uniques = ObjectVector()
+ labels = self._factorize(values, uniques=uniques,
+ na_sentinel=na_sentinel, na_value=na_value)
+ return labels, uniques.to_array()
+
+ def get_labels(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None):
+ return self._factorize(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index cb9ffc4bd0fd5..0f1eb12883fd5 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -467,15 +467,13 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None,
-------
labels, uniques : ndarray
"""
- (hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
+ (hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
- uniques = vec_klass()
- labels = table.get_labels(values, uniques, 0, na_sentinel,
- na_value=na_value)
+ labels, uniques = table.factorize(values, na_sentinel=na_sentinel,
+ na_value=na_value)
labels = ensure_platform_int(labels)
- uniques = uniques.to_array()
return labels, uniques
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 1fd801c68fdde..557669260604a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -15,7 +15,6 @@
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
-from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
@@ -228,19 +227,53 @@ def test_complex_sorting(self):
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
+ def test_float64_factorize(self, writable):
+ data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
+ data.setflags(write=writable)
+ exp_labels = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
+ exp_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
+
+ labels, uniques = algos.factorize(data)
+ tm.assert_numpy_array_equal(labels, exp_labels)
+ tm.assert_numpy_array_equal(uniques, exp_uniques)
+
def test_uint64_factorize(self, writable):
- data = np.array([2**63, 1, 2**63], dtype=np.uint64)
+ data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
- exp_uniques = np.array([2**63, 1], dtype=np.uint64)
+ exp_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
- data = np.array([2**63, -1, 2**63], dtype=object)
+ def test_int64_factorize(self, writable):
+ data = np.array([2**63 - 1, -2**63, 2**63 - 1], dtype=np.int64)
+ data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
- exp_uniques = np.array([2**63, -1], dtype=object)
+ exp_uniques = np.array([2**63 - 1, -2**63], dtype=np.int64)
+
+ labels, uniques = algos.factorize(data)
+ tm.assert_numpy_array_equal(labels, exp_labels)
+ tm.assert_numpy_array_equal(uniques, exp_uniques)
+
+ def test_string_factorize(self, writable):
+ data = np.array(['a', 'c', 'a', 'b', 'c'],
+ dtype=object)
+ data.setflags(write=writable)
+ exp_labels = np.array([0, 1, 0, 2, 1], dtype=np.intp)
+ exp_uniques = np.array(['a', 'c', 'b'], dtype=object)
+
+ labels, uniques = algos.factorize(data)
+ tm.assert_numpy_array_equal(labels, exp_labels)
+ tm.assert_numpy_array_equal(uniques, exp_uniques)
+
+ def test_object_factorize(self, writable):
+ data = np.array(['a', 'c', None, np.nan, 'a', 'b', pd.NaT, 'c'],
+ dtype=object)
+ data.setflags(write=writable)
+ exp_labels = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
+ exp_uniques = np.array(['a', 'c', 'b'], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
@@ -1262,41 +1295,107 @@ def test_get_unique(self):
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
- def test_vector_resize(self, writable):
+ @pytest.mark.parametrize('nvals', [0, 10]) # resizing to 0 is special case
+ @pytest.mark.parametrize('htable, uniques, dtype, safely_resizes', [
+ (ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
+ (ht.StringHashTable, ht.ObjectVector, 'object', True),
+ (ht.Float64HashTable, ht.Float64Vector, 'float64', False),
+ (ht.Int64HashTable, ht.Int64Vector, 'int64', False),
+ (ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)])
+ def test_vector_resize(self, writable, htable, uniques, dtype,
+ safely_resizes, nvals):
# Test for memory errors after internal vector
- # reallocations (pull request #7157)
-
- def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
- vals = np.array(np.random.randn(1000), dtype=dtype)
- # GH 21688 ensure we can deal with readonly memory views
- vals.setflags(write=writable)
- # get_labels may append to uniques
- htable.get_labels(vals[:nvals], uniques, 0, -1)
- # to_array() set an external_view_exists flag on uniques.
- tmp = uniques.to_array()
- oldshape = tmp.shape
- # subsequent get_labels() calls can no longer append to it
- # (for all but StringHashTables + ObjectVector)
- if safely_resizes:
+ # reallocations (GH 7157)
+ vals = np.array(np.random.randn(1000), dtype=dtype)
+
+ # GH 21688 ensures we can deal with read-only memory views
+ vals.setflags(write=writable)
+
+ # initialise instances; cannot initialise in parametrization,
+ # as otherwise external views would be held on the array (which is
+ # one of the things this test is checking)
+ htable = htable()
+ uniques = uniques()
+
+ # get_labels may append to uniques
+ htable.get_labels(vals[:nvals], uniques, 0, -1)
+ # to_array() sets an external_view_exists flag on uniques.
+ tmp = uniques.to_array()
+ oldshape = tmp.shape
+
+ # subsequent get_labels() calls can no longer append to it
+ # (except for StringHashTables + ObjectVector)
+ if safely_resizes:
+ htable.get_labels(vals, uniques, 0, -1)
+ else:
+ with tm.assert_raises_regex(ValueError, 'external reference.*'):
htable.get_labels(vals, uniques, 0, -1)
- else:
- with pytest.raises(ValueError) as excinfo:
- htable.get_labels(vals, uniques, 0, -1)
- assert str(excinfo.value).startswith('external reference')
- uniques.to_array() # should not raise here
- assert tmp.shape == oldshape
-
- test_cases = [
- (ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
- (ht.StringHashTable, ht.ObjectVector, 'object', True),
- (ht.Float64HashTable, ht.Float64Vector, 'float64', False),
- (ht.Int64HashTable, ht.Int64Vector, 'int64', False),
- (ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
-
- for (tbl, vect, dtype, safely_resizes) in test_cases:
- # resizing to empty is a special case
- _test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
- _test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
+
+ uniques.to_array() # should not raise here
+ assert tmp.shape == oldshape
+
+ @pytest.mark.parametrize('htable, tm_dtype', [
+ (ht.PyObjectHashTable, 'String'),
+ (ht.StringHashTable, 'String'),
+ (ht.Float64HashTable, 'Float'),
+ (ht.Int64HashTable, 'Int'),
+ (ht.UInt64HashTable, 'UInt')])
+ def test_hashtable_unique(self, htable, tm_dtype, writable):
+ # output of maker has guaranteed unique elements
+ maker = getattr(tm, 'make' + tm_dtype + 'Index')
+ s = Series(maker(1000))
+ if htable == ht.Float64HashTable:
+ # add NaN for float column
+ s.loc[500] = np.nan
+ elif htable == ht.PyObjectHashTable:
+ # use different NaN types for object column
+ s.loc[500:502] = [np.nan, None, pd.NaT]
+
+ # create duplicated selection
+ s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
+ s_duplicated.values.setflags(write=writable)
+
+ # drop_duplicates has own cython code (hash_table_func_helper.pxi)
+ # and is tested separately; keeps first occurrence like ht.unique()
+ expected_unique = s_duplicated.drop_duplicates(keep='first').values
+ result_unique = htable().unique(s_duplicated.values)
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
+
+ @pytest.mark.parametrize('htable, tm_dtype', [
+ (ht.PyObjectHashTable, 'String'),
+ (ht.StringHashTable, 'String'),
+ (ht.Float64HashTable, 'Float'),
+ (ht.Int64HashTable, 'Int'),
+ (ht.UInt64HashTable, 'UInt')])
+ def test_hashtable_factorize(self, htable, tm_dtype, writable):
+ # output of maker has guaranteed unique elements
+ maker = getattr(tm, 'make' + tm_dtype + 'Index')
+ s = Series(maker(1000))
+ if htable == ht.Float64HashTable:
+ # add NaN for float column
+ s.loc[500] = np.nan
+ elif htable == ht.PyObjectHashTable:
+ # use different NaN types for object column
+ s.loc[500:502] = [np.nan, None, pd.NaT]
+
+ # create duplicated selection
+ s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
+ s_duplicated.values.setflags(write=writable)
+ na_mask = s_duplicated.isna().values
+
+ result_inverse, result_unique = htable().factorize(s_duplicated.values)
+
+ # drop_duplicates has own cython code (hash_table_func_helper.pxi)
+ # and is tested separately; keeps first occurrence like ht.factorize()
+ # since factorize removes all NaNs, we do the same here
+ expected_unique = s_duplicated.dropna().drop_duplicates().values
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
+
+ # reconstruction can only succeed if the inverse is correct. Since
+ # factorize removes the NaNs, those have to be excluded here as well
+ result_reconstruct = result_unique[result_inverse[~na_mask]]
+ expected_reconstruct = s_duplicated.dropna().values
+ tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)
def test_quantile():
@@ -1311,14 +1410,14 @@ def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
- left = unique_label_indices(a)
+ left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
- left = unique_label_indices(a)
+ left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
| For adding a `return_inverse`-kwarg to `unique` (#21357 / #21645), I originally didn't want to have to add something to the templated cython code, preferring to add a solution using `numpy.unique` as a first step (with the idea of improving the performance later).
@jorisvandenbossche then remarked:
> Which led me think: `pd.unique` with a `return_inverse` argument is actually basically the same as `pd.factorize`?
I didn't know what `factorize` was doing, so I had no idea about this connection. From looking at the cython code in `hashtable_class_helper.pxi.in`, I found that there's substantial overlap between `unique` and `get_labels` (the core of `factorize`) - the only difference is the handling of `NaN/None/etc.`
I think that these could/should be unified for less code duplication. Alternatively, the first commit of this PR could be split off into a separate PR - it is *just* adding a `return_inverse`-kwarg to `unique` -- though effectively by duplicating the `factorize` code.
Both variants pass the the test suite locally, the only question is whether there's appreciable differences in the ASVs. Since the only extra complexity of unifying the code is reading the value of some `bint` within the loop, I don't think that it'll be relevant. I got some noisy data the first time 'round, will let it run again when I have the possibility.
So long story short, this PR prepares the hashtable-backend to support `return_inverse=True`, which plays into #21357 #21645 #22824, and will also allow to easily solve #21720.
Side note: this already builds on #22978 to be able to run the ASVs at all. | https://api.github.com/repos/pandas-dev/pandas/pulls/22986 | 2018-10-03T22:47:17Z | 2018-10-18T15:50:49Z | 2018-10-18T15:50:49Z | 2018-10-22T14:16:36Z |
CLN: values is required argument in _shallow_copy_with_infer | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index af04a846ed787..51c84d6e28cb4 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -530,7 +530,7 @@ def _shallow_copy(self, values=None, **kwargs):
return self._simple_new(values, **attributes)
- def _shallow_copy_with_infer(self, values=None, **kwargs):
+ def _shallow_copy_with_infer(self, values, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
@@ -543,8 +543,6 @@ def _shallow_copy_with_infer(self, values=None, **kwargs):
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
- if values is None:
- values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3e6b934e1e863..822c0b864059c 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -555,7 +555,7 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy_with_infer(self, values=None, **kwargs):
+ def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 969391569ce50..cc008694a8b84 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -287,7 +287,7 @@ def _from_ordinals(cls, values, name=None, freq=None, **kwargs):
result._reset_identity()
return result
- def _shallow_copy_with_infer(self, values=None, **kwargs):
+ def _shallow_copy_with_infer(self, values, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
| @TomAugspurger @jbrockmendel From looking at the code related to the discussion in https://github.com/pandas-dev/pandas/pull/22961, I noticed `_shallow_copy_with_infer` has the potential to not pass any values, which is never actually used in pandas. So a small code clean-up | https://api.github.com/repos/pandas-dev/pandas/pulls/22983 | 2018-10-03T19:55:24Z | 2018-10-04T11:16:28Z | 2018-10-04T11:16:28Z | 2018-10-04T16:57:29Z |
Merge asof float fix | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index f246ebad3aa2c..bded5c1b644e9 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -847,6 +847,7 @@ Reshaping
- Bug in :meth:`DataFrame.drop_duplicates` for empty ``DataFrame`` which incorrectly raises an error (:issue:`20516`)
- Bug in :func:`pandas.wide_to_long` when a string is passed to the stubnames argument and a column name is a substring of that stubname (:issue:`22468`)
- Bug in :func:`merge` when merging ``datetime64[ns, tz]`` data that contained a DST transition (:issue:`18885`)
+- Bug in :func:`merge_asof` when merging on float values within defined tolerance (:issue:`22981`)
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index c4305136accb1..d0c7b66978661 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -23,6 +23,7 @@
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
+ is_number,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
@@ -1356,8 +1357,14 @@ def _get_merge_keys(self):
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
+ elif is_float_dtype(lt):
+ if not is_number(self.tolerance):
+ raise MergeError(msg)
+ if self.tolerance < 0:
+ raise MergeError("tolerance must be positive")
+
else:
- raise MergeError("key must be integer or timestamp")
+ raise MergeError("key must be integer, timestamp or float")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index d5df9d3820fdc..c75a6a707cafc 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -642,6 +642,21 @@ def test_tolerance_tz(self):
'value2': list("BCDEE")})
assert_frame_equal(result, expected)
+ def test_tolerance_float(self):
+ # GH22981
+ left = pd.DataFrame({'a': [1.1, 3.5, 10.9],
+ 'left_val': ['a', 'b', 'c']})
+ right = pd.DataFrame({'a': [1.0, 2.5, 3.3, 7.5, 11.5],
+ 'right_val': [1.0, 2.5, 3.3, 7.5, 11.5]})
+
+ expected = pd.DataFrame({'a': [1.1, 3.5, 10.9],
+ 'left_val': ['a', 'b', 'c'],
+ 'right_val': [1, 3.3, np.nan]})
+
+ result = pd.merge_asof(left, right, on='a', direction='nearest',
+ tolerance=0.5)
+ assert_frame_equal(result, expected)
+
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index('time')
| Fix bug with merge_asof when merging on floats and selecting a tolerance
- [ ] closes #22981
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22982 | 2018-10-03T19:08:28Z | 2018-10-04T23:29:07Z | 2018-10-04T23:29:07Z | 2018-10-17T06:02:50Z |
DOC: Documentation for pandas.core.indexes.api | diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index e50a4b099a8e1..4929710d416e7 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -44,9 +44,28 @@
def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True):
- # Extract combined index: return intersection or union (depending on the
- # value of "intersect") of indexes on given axis, or None if all objects
- # lack indexes (e.g. they are numpy arrays)
+ """
+ Extract combined index: return intersection or union (depending on the
+ value of "intersect") of indexes on given axis, or None if all objects
+ lack indexes (e.g. they are numpy arrays).
+
+ Parameters
+ ----------
+ objs : list of objects
+ Each object will only be considered if it has a _get_axis
+ attribute.
+ intersect : bool, default False
+ If True, calculate the intersection between indexes. Otherwise,
+ calculate the union.
+ axis : {0 or 'index', 1 or 'outer'}, default 0
+ The axis to extract indexes from.
+ sort : bool, default True
+ Whether the result index should come out sorted or not.
+
+ Returns
+ -------
+ Index
+ """
obs_idxes = [obj._get_axis(axis) for obj in objs
if hasattr(obj, '_get_axis')]
if obs_idxes:
@@ -54,6 +73,24 @@ def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True):
def _get_combined_index(indexes, intersect=False, sort=False):
+ """
+ Return the union or intersection of indexes.
+
+ Parameters
+ ----------
+ indexes : list of Index or list objects
+ When intersect=True, do not accept list of lists.
+ intersect : bool, default False
+ If True, calculate the intersection between indexes. Otherwise,
+ calculate the union.
+ sort : bool, default False
+ Whether the result index should come out sorted or not.
+
+ Returns
+ -------
+ Index
+ """
+
# TODO: handle index names!
indexes = com.get_distinct_objs(indexes)
if len(indexes) == 0:
@@ -77,6 +114,21 @@ def _get_combined_index(indexes, intersect=False, sort=False):
def _union_indexes(indexes, sort=True):
+ """
+ Return the union of indexes.
+
+ The behavior of sort and names is not consistent.
+
+ Parameters
+ ----------
+ indexes : list of Index or list objects
+ sort : bool, default True
+ Whether the result index should come out sorted or not.
+
+ Returns
+ -------
+ Index
+ """
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
@@ -88,6 +140,19 @@ def _union_indexes(indexes, sort=True):
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
+ """
+ Convert indexes to lists and concatenate them, removing duplicates.
+
+ The final dtype is inferred.
+
+ Parameters
+ ----------
+ inds : list of Index or list objects
+
+ Returns
+ -------
+ Index
+ """
def conv(i):
if isinstance(i, Index):
i = i.tolist()
@@ -126,6 +191,26 @@ def conv(i):
def _sanitize_and_check(indexes):
+ """
+ Verify the type of indexes and convert lists to Index.
+
+ Cases:
+
+ - [list, list, ...]: Return ([list, list, ...], 'list')
+ - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
+ Lists are sorted and converted to Index.
+ - [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
+ TYPE = 'special' if at least one special type, 'array' otherwise.
+
+ Parameters
+ ----------
+ indexes : list of Index or list objects
+
+ Returns
+ -------
+ sanitized_indexes : list of Index or list objects
+ type : {'list', 'array', 'special'}
+ """
kinds = list({type(index) for index in indexes})
if list in kinds:
@@ -144,6 +229,21 @@ def _sanitize_and_check(indexes):
def _get_consensus_names(indexes):
+ """
+ Give a consensus 'names' to indexes.
+
+ If there's exactly one non-empty 'names', return this,
+ otherwise, return empty.
+
+ Parameters
+ ----------
+ indexes : list of Index objects
+
+ Returns
+ -------
+ list
+ A list representing the consensus 'names' found.
+ """
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
@@ -155,6 +255,18 @@ def _get_consensus_names(indexes):
def _all_indexes_same(indexes):
+ """
+ Determine if all indexes contain the same elements.
+
+ Parameters
+ ----------
+ indexes : list of Index objects
+
+ Returns
+ -------
+ bool
+ True if all indexes contain the same elements, False otherwise.
+ """
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
| The functions on pandas.core.indexes.api had almost no documentation. This is an attempt to explain them better and a progress towards #22915. | https://api.github.com/repos/pandas-dev/pandas/pulls/22980 | 2018-10-03T18:52:47Z | 2018-11-04T09:56:13Z | 2018-11-04T09:56:13Z | 2018-11-04T09:56:28Z |
Skip non-numeric columns when making a boxplot | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index ec6743e205848..64047947ec38f 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -15,7 +15,8 @@
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
- is_hashable, is_integer, is_iterator, is_list_like, is_number)
+ is_datetimelike, is_hashable, is_integer, is_iterator, is_list_like,
+ is_number)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCIndexClass, ABCMultiIndex, ABCPeriodIndex, ABCSeries)
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
@@ -1566,6 +1567,11 @@ def __init__(self, data, return_type='axes', **kwargs):
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
+ if isinstance(data, ABCDataFrame):
+ data = data.select_dtypes(include='number')
+ elif is_datetimelike(data):
+ raise ValueError("Box plot is not implemented for datetime "
+ "array-like objects")
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 4e047cd44c1e2..874a30d214a1c 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -1501,6 +1501,15 @@ def test_boxplot_subplots_return_type(self):
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
+ @pytest.mark.slow
+ def test_boxplot_datetime(self):
+ # Regression test for pandas issue #22799
+ df = pd.DataFrame({'a': pd.date_range("2012-01-01", periods=100),
+ 'b': np.random.randn(100),
+ 'c': np.random.randn(100) + 2})
+ self._check_data(df.plot(kind='box'),
+ df.loc[:, ['b', 'c']].plot(kind='box'))
+
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index cc8aa2018b1a0..23be7ea2a9477 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -675,6 +675,12 @@ def test_boxplot_series(self):
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
+ @pytest.mark.slow
+ def test_boxplot_datetime(self):
+ s = Series(date_range("2012-01-01", periods=100))
+ with pytest.raises(ValueError):
+ s.plot(kind='box')
+
@pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
| This PR updates the `BoxPlot` class to skip non-numeric columns when making a DataFrame boxplot. Also raises a more meaningful error message when trying to plot a datetime-like Series.
Closes #22799.
- [x] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22979 | 2018-10-03T17:45:19Z | 2019-02-06T03:32:32Z | null | 2019-02-06T03:32:32Z |
Fix ASV import error | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index c5b147b152aa6..2850fa249725c 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -2,10 +2,10 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index,
- IntervalIndex, CategoricalIndex,
- IndexSlice, concat, date_range)
-from .pandas_vb_common import setup, Panel # noqa
+from pandas import (Series, DataFrame, MultiIndex, Panel,
+ Int64Index, Float64Index, IntervalIndex,
+ CategoricalIndex, IndexSlice, concat, date_range)
+from .pandas_vb_common import setup # noqa
class NumericSeriesIndexing(object):
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 7487a0d8489b7..6624c3d0aaf49 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -3,14 +3,15 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (DataFrame, Series, MultiIndex, date_range, concat, merge,
- merge_asof)
+from pandas import (DataFrame, Series, Panel, MultiIndex,
+ date_range, concat, merge, merge_asof)
+
try:
from pandas import merge_ordered
except ImportError:
from pandas import ordered_merge as merge_ordered
-from .pandas_vb_common import Panel, setup # noqa
+from .pandas_vb_common import setup # noqa
class Append(object):
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index ce946c76ed199..4614bbd198afa 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -1,9 +1,9 @@
import warnings
from datetime import datetime, timedelta
-from pandas import DataFrame, DatetimeIndex, date_range
+from pandas import DataFrame, Panel, DatetimeIndex, date_range
-from .pandas_vb_common import Panel, setup # noqa
+from .pandas_vb_common import setup # noqa
class DifferentIndexes(object):
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index a5b1a92e9cf67..4d19e9a87c507 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -1,8 +1,9 @@
import warnings
import numpy as np
+from pandas import Panel
-from .pandas_vb_common import Panel, setup # noqa
+from .pandas_vb_common import setup # noqa
class PanelMethods(object):
| #22886 removed `from pandas import Panel` (a left-over from waiting to deprecating `WidePanel`, I believe) from `asv_bench/benchmarks/pandas_vb_common.py` due to linting, and now several benchmarks are failing because they try
```
from .pandas_vb_common import setup, Panel # noqa
```
This fixes the imports, by adding `Panel` to the other pandas-import appropriately.
The larger question is if the ASV code should be part of the CI somehow to catch such errors. There's a way to execute them in minimal form with `asv dev` (corresponding to `asv run --python=same --quick --show-stderr --dry-run`), see https://asv.readthedocs.io/en/stable/writing_benchmarks.html#running-benchmarks-during-development | https://api.github.com/repos/pandas-dev/pandas/pulls/22978 | 2018-10-03T16:05:06Z | 2018-10-04T01:34:35Z | 2018-10-04T01:34:35Z | 2018-10-05T16:21:36Z |
Safer is dtype | diff --git a/.travis.yml b/.travis.yml
index 40baee2c03ea0..c9bdb91283d42 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -53,11 +53,7 @@ matrix:
- dist: trusty
env:
- JOB="3.6, coverage" ENV_FILE="ci/travis-36.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" COVERAGE=true DOCTEST=true
- # In allow_failures
- - dist: trusty
- env:
- - JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
- # In allow_failures
+
- dist: trusty
env:
- JOB="3.7, NumPy dev" ENV_FILE="ci/travis-37-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network -W error" PANDAS_TESTING_MODE="deprecate"
@@ -65,6 +61,12 @@ matrix:
apt:
packages:
- xsel
+
+ # In allow_failures
+ - dist: trusty
+ env:
+ - JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
+
# In allow_failures
- dist: trusty
env:
@@ -73,13 +75,6 @@ matrix:
- dist: trusty
env:
- JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
- - dist: trusty
- env:
- - JOB="3.7, NumPy dev" ENV_FILE="ci/travis-37-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network -W error" PANDAS_TESTING_MODE="deprecate"
- addons:
- apt:
- packages:
- - xsel
- dist: trusty
env:
- JOB="3.6, doc" ENV_FILE="ci/travis-36-doc.yaml" DOC=true
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index a552251ebbafa..db0a917aefb85 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -2,6 +2,7 @@
import numpy as np
from pandas import compat
+from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCDataFrame
from pandas.errors import AbstractMethodError
@@ -83,7 +84,12 @@ def is_dtype(cls, dtype):
"""
dtype = getattr(dtype, 'dtype', dtype)
- if isinstance(dtype, np.dtype):
+ if isinstance(dtype, (ABCSeries, ABCIndexClass,
+ ABCDataFrame, np.dtype)):
+ # https://github.com/pandas-dev/pandas/issues/22960
+ # avoid passing data to `construct_from_string`. This could
+ # cause a FutureWarning from numpy about failing elementwise
+ # comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ff7590f6d5358..f4b7ccb0fdf5b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4908,7 +4908,8 @@ def _combine_match_index(self, other, func, level=None):
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
- new_data = func(left.values.T, right.values).T
+ with np.errstate(all="ignore"):
+ new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 82198c2b3edd5..fde83237b23d7 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4224,7 +4224,7 @@ def _try_cast(arr, take_fast_path):
try:
# gh-15832: Check if we are requesting a numeric dype and
# that we can convert the data to the requested dtype.
- if is_float_dtype(dtype) or is_integer_dtype(dtype):
+ if is_integer_dtype(dtype):
subarr = maybe_cast_to_integer_array(arr, dtype)
subarr = maybe_cast_to_datetime(arr, dtype)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index e3d14497a38f9..7e95b076a8a66 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -815,3 +815,23 @@ def test_registry_find(dtype, expected):
('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))])
def test_pandas_registry_find(dtype, expected):
assert _pandas_registry.find(dtype) == expected
+
+
+@pytest.mark.parametrize("check", [
+ is_categorical_dtype,
+ is_datetime64tz_dtype,
+ is_period_dtype,
+ is_datetime64_ns_dtype,
+ is_datetime64_dtype,
+ is_interval_dtype,
+ is_datetime64_any_dtype,
+ is_string_dtype,
+ is_bool_dtype,
+])
+def test_is_dtype_no_warning(check):
+ data = pd.DataFrame({"A": [1, 2]})
+ with tm.assert_produces_warning(None):
+ check(data)
+
+ with tm.assert_produces_warning(None):
+ check(data["A"])
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 97c94e1134cc8..6ed289614b96a 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -1030,3 +1030,9 @@ def test_alignment_non_pandas(self):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
+
+ def test_no_warning(self, all_arithmetic_operators):
+ df = pd.DataFrame({"A": [0., 0.], "B": [0., None]})
+ b = df['B']
+ with tm.assert_produces_warning(None):
+ getattr(df, all_arithmetic_operators)(b, 0)
| Closes https://github.com/pandas-dev/pandas/issues/22960 | https://api.github.com/repos/pandas-dev/pandas/pulls/22975 | 2018-10-03T13:43:12Z | 2018-10-04T11:30:30Z | 2018-10-04T11:30:30Z | 2018-10-04T17:35:46Z |
TST: Fixturize series/test_missing.py | diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index ab3fdd8cbf84f..b3f105ee5cb67 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -21,8 +21,6 @@
import pandas.util.testing as tm
import pandas.util._test_decorators as td
-from .common import TestData
-
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
@@ -52,7 +50,7 @@ def _simple_ts(start, end, freq='D'):
return Series(np.random.randn(len(rng)), index=rng)
-class TestSeriesMissingData(TestData):
+class TestSeriesMissingData():
def test_remove_na_deprecation(self):
# see gh-16971
@@ -489,7 +487,7 @@ def test_isnull_for_inf_deprecated(self):
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
- def test_fillna(self):
+ def test_fillna(self, datetime_series):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
@@ -506,7 +504,8 @@ def test_fillna(self):
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
- pytest.raises(ValueError, self.ts.fillna, value=0, method='ffill')
+ pytest.raises(ValueError, datetime_series.fillna, value=0,
+ method='ffill')
# GH 5703
s1 = Series([np.nan])
@@ -576,9 +575,9 @@ def test_fillna_inplace(self):
expected = x.fillna(value=0)
assert_series_equal(y, expected)
- def test_fillna_invalid_method(self):
+ def test_fillna_invalid_method(self, datetime_series):
try:
- self.ts.fillna(method='ffil')
+ datetime_series.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
@@ -632,8 +631,8 @@ def test_timedelta64_nan(self):
# def test_logical_range_select(self):
# np.random.seed(12345)
- # selector = -0.5 <= self.ts <= 0.5
- # expected = (self.ts >= -0.5) & (self.ts <= 0.5)
+ # selector = -0.5 <= datetime_series <= 0.5
+ # expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
@@ -688,8 +687,8 @@ def test_dropna_intervals(self):
expected = s.iloc[1:]
assert_series_equal(result, expected)
- def test_valid(self):
- ts = self.ts.copy()
+ def test_valid(self, datetime_series):
+ ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.dropna()
@@ -734,12 +733,12 @@ def test_pad_require_monotonicity(self):
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
- def test_dropna_preserve_name(self):
- self.ts[:5] = np.nan
- result = self.ts.dropna()
- assert result.name == self.ts.name
- name = self.ts.name
- ts = self.ts.copy()
+ def test_dropna_preserve_name(self, datetime_series):
+ datetime_series[:5] = np.nan
+ result = datetime_series.dropna()
+ assert result.name == datetime_series.name
+ name = datetime_series.name
+ ts = datetime_series.copy()
ts.dropna(inplace=True)
assert ts.name == name
@@ -825,10 +824,11 @@ def test_series_pad_backfill_limit(self):
assert_series_equal(result, expected)
-class TestSeriesInterpolateData(TestData):
+class TestSeriesInterpolateData():
- def test_interpolate(self):
- ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
+ def test_interpolate(self, datetime_series, string_series):
+ ts = Series(np.arange(len(datetime_series), dtype=float),
+ datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
@@ -836,8 +836,8 @@ def test_interpolate(self):
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
- ord_ts = Series([d.toordinal() for d in self.ts.index],
- index=self.ts.index).astype(float)
+ ord_ts = Series([d.toordinal() for d in datetime_series.index],
+ index=datetime_series.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
@@ -847,7 +847,7 @@ def test_interpolate(self):
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
- non_ts = self.series.copy()
+ non_ts = string_series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22973 | 2018-10-03T09:36:05Z | 2018-10-04T11:20:47Z | 2018-10-04T11:20:47Z | 2018-10-04T11:20:51Z |
TST: Fixturize series/test_io.py | diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index cbf9bff06ad34..50f548b855247 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -16,10 +16,8 @@
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
-from .common import TestData
-
-class TestSeriesToCSV(TestData):
+class TestSeriesToCSV():
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0,
@@ -34,10 +32,10 @@ def read_csv(self, path, **kwargs):
return out
- def test_from_csv_deprecation(self):
+ def test_from_csv_deprecation(self, datetime_series):
# see gh-17812
with ensure_clean() as path:
- self.ts.to_csv(path, header=False)
+ datetime_series.to_csv(path, header=False)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
@@ -46,7 +44,7 @@ def test_from_csv_deprecation(self):
assert_series_equal(depr_ts, ts)
@pytest.mark.parametrize("arg", ["path", "header", "both"])
- def test_to_csv_deprecation(self, arg):
+ def test_to_csv_deprecation(self, arg, datetime_series):
# see gh-19715
with ensure_clean() as path:
if arg == "path":
@@ -57,18 +55,18 @@ def test_to_csv_deprecation(self, arg):
kwargs = dict(path=path)
with tm.assert_produces_warning(FutureWarning):
- self.ts.to_csv(**kwargs)
+ datetime_series.to_csv(**kwargs)
# Make sure roundtrip still works.
ts = self.read_csv(path)
- assert_series_equal(self.ts, ts, check_names=False)
+ assert_series_equal(datetime_series, ts, check_names=False)
- def test_from_csv(self):
+ def test_from_csv(self, datetime_series, string_series):
with ensure_clean() as path:
- self.ts.to_csv(path, header=False)
+ datetime_series.to_csv(path, header=False)
ts = self.read_csv(path)
- assert_series_equal(self.ts, ts, check_names=False)
+ assert_series_equal(datetime_series, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
@@ -79,18 +77,18 @@ def test_from_csv(self):
assert_series_equal(depr_ts, ts)
# see gh-10483
- self.ts.to_csv(path, header=True)
+ datetime_series.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
- self.series.to_csv(path, header=False)
+ string_series.to_csv(path, header=False)
series = self.read_csv(path)
- assert_series_equal(self.series, series, check_names=False)
+ assert_series_equal(string_series, series, check_names=False)
assert series.name is None
assert series.index.name is None
- self.series.to_csv(path, header=True)
+ string_series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
@@ -106,19 +104,19 @@ def test_from_csv(self):
check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0})
assert_series_equal(check_series, series)
- def test_to_csv(self):
+ def test_to_csv(self, datetime_series):
import io
with ensure_clean() as path:
- self.ts.to_csv(path, header=False)
+ datetime_series.to_csv(path, header=False)
with io.open(path, newline=None) as f:
lines = f.readlines()
assert (lines[1] != '\n')
- self.ts.to_csv(path, index=False, header=False)
+ datetime_series.to_csv(path, index=False, header=False)
arr = np.loadtxt(path)
- assert_almost_equal(arr, self.ts.values)
+ assert_almost_equal(arr, datetime_series.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
@@ -196,22 +194,23 @@ def test_to_csv_compression(self, s, encoding, compression):
encoding=encoding))
-class TestSeriesIO(TestData):
+class TestSeriesIO():
- def test_to_frame(self):
- self.ts.name = None
- rs = self.ts.to_frame()
- xp = pd.DataFrame(self.ts.values, index=self.ts.index)
+ def test_to_frame(self, datetime_series):
+ datetime_series.name = None
+ rs = datetime_series.to_frame()
+ xp = pd.DataFrame(datetime_series.values, index=datetime_series.index)
assert_frame_equal(rs, xp)
- self.ts.name = 'testname'
- rs = self.ts.to_frame()
- xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
+ datetime_series.name = 'testname'
+ rs = datetime_series.to_frame()
+ xp = pd.DataFrame(dict(testname=datetime_series.values),
+ index=datetime_series.index)
assert_frame_equal(rs, xp)
- rs = self.ts.to_frame(name='testdifferent')
- xp = pd.DataFrame(
- dict(testdifferent=self.ts.values), index=self.ts.index)
+ rs = datetime_series.to_frame(name='testdifferent')
+ xp = pd.DataFrame(dict(testdifferent=datetime_series.values),
+ index=datetime_series.index)
assert_frame_equal(rs, xp)
def test_timeseries_periodindex(self):
@@ -256,11 +255,12 @@ class SubclassedFrame(DataFrame):
dict,
collections.defaultdict(list),
collections.OrderedDict))
- def test_to_dict(self, mapping):
+ def test_to_dict(self, mapping, datetime_series):
# GH16122
- ts = TestData().ts
tm.assert_series_equal(
- Series(ts.to_dict(mapping), name='ts'), ts)
- from_method = Series(ts.to_dict(collections.Counter))
- from_constructor = Series(collections.Counter(ts.iteritems()))
+ Series(datetime_series.to_dict(mapping), name='ts'),
+ datetime_series)
+ from_method = Series(datetime_series.to_dict(collections.Counter))
+ from_constructor = Series(collections
+ .Counter(datetime_series.iteritems()))
tm.assert_series_equal(from_method, from_constructor)
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22972 | 2018-10-03T09:23:48Z | 2018-10-04T11:21:46Z | 2018-10-04T11:21:46Z | 2018-10-04T11:21:49Z |
DOC: fixed doc-string for combine & combine_first in pandas/core/series.py | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 892b24f6ee552..b20bcad2aa09f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2281,37 +2281,71 @@ def _binop(self, other, func, level=None, fill_value=None):
def combine(self, other, func, fill_value=None):
"""
- Perform elementwise binary operation on two Series using given function
- with optional fill value when an index is missing from one Series or
- the other
+ Combine the Series with a Series or scalar according to `func`.
+
+ Combine the Series and `other` using `func` to perform elementwise
+ selection for combined Series.
+ `fill_value` is assumed when value is missing at some index
+ from one of the two objects being combined.
Parameters
----------
- other : Series or scalar value
+ other : Series or scalar
+ The value(s) to be combined with the `Series`.
func : function
- Function that takes two scalars as inputs and return a scalar
- fill_value : scalar value
- The default specifies to use the appropriate NaN value for
- the underlying dtype of the Series
+ Function that takes two scalars as inputs and returns an element.
+ fill_value : scalar, optional
+ The value to assume when an index is missing from
+ one Series or the other. The default specifies to use the
+ appropriate NaN value for the underlying dtype of the Series.
Returns
-------
- result : Series
-
- Examples
- --------
- >>> s1 = pd.Series([1, 2])
- >>> s2 = pd.Series([0, 3])
- >>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2)
- 0 0
- 1 2
- dtype: int64
+ Series
+ The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
- Series's values first.
- """
+ Series' values first.
+
+ Examples
+ --------
+ Consider 2 Datasets ``s1`` and ``s2`` containing
+ highest clocked speeds of different birds.
+
+ >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
+ >>> s1
+ falcon 330.0
+ eagle 160.0
+ dtype: float64
+ >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
+ >>> s2
+ falcon 345.0
+ eagle 200.0
+ duck 30.0
+ dtype: float64
+
+ Now, to combine the two datasets and view the highest speeds
+ of the birds across the two datasets
+
+ >>> s1.combine(s2, max)
+ duck NaN
+ eagle 200.0
+ falcon 345.0
+ dtype: float64
+
+ In the previous example, the resulting value for duck is missing,
+ because the maximum of a NaN and a float is a NaN.
+ So, in the example, we set ``fill_value=0``,
+ so the maximum value returned will be the value from some dataset.
+
+ >>> s1.combine(s2, max, fill_value=0)
+ duck 30.0
+ eagle 200.0
+ falcon 345.0
+ dtype: float64
+"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
@@ -2352,16 +2386,26 @@ def combine(self, other, func, fill_value=None):
def combine_first(self, other):
"""
- Combine Series values, choosing the calling Series's values
- first. Result index will be the union of the two indexes
+ Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
+ The value(s) to be combined with the `Series`.
Returns
-------
- combined : Series
+ Series
+ The result of combining the Series with the other object.
+
+ See Also
+ --------
+ Series.combine : Perform elementwise operation on two Series
+ using a given function.
+
+ Notes
+ -----
+ Result index will be the union of the two indexes.
Examples
--------
@@ -2371,11 +2415,6 @@ def combine_first(self, other):
0 1.0
1 4.0
dtype: float64
-
- See Also
- --------
- Series.combine : Perform elementwise operation on two Series
- using a given function.
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
| - [x] closes #22953
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22971 | 2018-10-03T09:21:50Z | 2018-11-21T12:48:25Z | 2018-11-21T12:48:25Z | 2019-03-27T12:35:19Z |
Example fill value | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 3e1711edb0f27..9203e98736924 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -238,6 +238,40 @@ For situations where you need an ``ndarray`` of ``Interval`` objects, use
np.asarray(idx)
idx.values.astype(object)
+
+.. _whatsnew_0240.api.types.is_scalar:
+
+Support for PEP 3141 numbers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The `is_scalar` function now returns True when a `Number` or `Fraction` is passed.
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [1]: pandas.api.types.is_scalar(fractions.Fraction(1))
+ Out[1]:
+ False
+
+ In [2]: pandas.api.types.is_scalar(numbers.Number(1))
+ Out[2]:
+ False
+
+New Behavior:
+
+.. code-block:: ipython
+
+ In [1]: pandas.api.types.is_scalar(fractions.Fraction(1))
+ Out[1]:
+ True
+
+ In [2]: pandas.api.types.is_scalar(numbers.Number(1))
+ Out[2]:
+ True
+
+This mirrors ``numpy.isscalar``, which already supports PEP 3141 and is a requirement for `pandas`.
+
.. _whatsnew_0240.api.timezone_offset_parsing:
Parsing Datetime Strings with Timezone Offsets
@@ -829,4 +863,5 @@ Other
- :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`)
- :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly.
- Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`)
--
+- Support PEP 3141 numbers in `pandas.api.types.is_scalar` function
+-
\ No newline at end of file
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 0b9793a6ef97a..1b84d09ae6a85 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -119,7 +119,8 @@ def is_scalar(val: object) -> bint:
- instances of decimal.Decimal
- Interval
- DateOffset
-
+ - Fraction
+ - Number
"""
return (cnp.PyArray_IsAnyScalar(val)
@@ -134,8 +135,8 @@ def is_scalar(val: object) -> bint:
or util.is_period_object(val)
or is_decimal(val)
or is_interval(val)
- or util.is_offset_object(val))
-
+ or util.is_offset_object(val)
+ or np.isscalar(val))
def item_from_zerodim(val: object) -> object:
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 83f80c305c5eb..9bc9777c59d8f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2266,30 +2266,41 @@ def _binop(self, other, func, level=None, fill_value=None):
def combine(self, other, func, fill_value=None):
"""
+ Combine the Series with a `Series` or `Scalar` according to `func`.
+
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
- the other
+ the other.
Parameters
----------
other : Series or scalar value
+ The value(s) to be combined with the `Series`.
func : function
- Function that takes two scalars as inputs and return a scalar
+ Function that takes two scalars as inputs and return a scalar.
fill_value : scalar value
+ The optional value to assume when an index
+ is missing from one Series or the other,
The default specifies to use the appropriate NaN value for
- the underlying dtype of the Series
+ the underlying dtype of the Series.
Returns
-------
- result : Series
+ result : the combined `Series` object
Examples
--------
>>> s1 = pd.Series([1, 2])
- >>> s2 = pd.Series([0, 3])
+ >>> s2 = pd.Series([0, 3, 4])
>>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2)
0 0
1 2
+ 2 4
+ dtype: int64
+ >>> s1.combine(s2, lambda x1, x2: x1 if x1 > x2 else x2,fill_value=787)
+ 0 1
+ 1 3
+ 2 787
dtype: int64
See Also
@@ -2333,12 +2344,16 @@ def combine(self, other, func, fill_value=None):
def combine_first(self, other):
"""
- Combine Series values, choosing the calling Series's values
- first. Result index will be the union of the two indexes
+ Combine Series values, choosing the calling Series's values first.
+
+ Notes
+ -----
+ Result index will be the union of the two indexes.
Parameters
----------
other : Series
+ The value(s) to be combined with the `Series`.
Returns
-------
| - [x] closes #22953
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22970 | 2018-10-03T09:11:54Z | 2018-10-03T09:12:49Z | null | 2018-10-03T09:13:34Z |
DOC: Fixed the doctsring for _set_axis_name (GH 22895) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index dde671993a56b..97ea4fb96ce95 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1109,16 +1109,15 @@ def rename(self, *args, **kwargs):
('inplace', False)])
def rename_axis(self, mapper=None, **kwargs):
"""
- Alter the name of the index or name of Index object that is the
- columns.
+ Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
- dict-like or functions transformations to apply to
- that axis' values.
+ A scalar, list-like, dict-like or functions transformations to
+ apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
@@ -1126,18 +1125,25 @@ def rename_axis(self, mapper=None, **kwargs):
.. versionchanged:: 0.24.0
- axis : int or string, default 0
- copy : boolean, default True
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ The axis to rename.
+ copy : bool, default True
Also copy underlying data.
- inplace : boolean, default False
+ inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
- renamed : Series, DataFrame, or None
+ Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
+ See Also
+ --------
+ Series.rename : Alter Series index labels or name.
+ DataFrame.rename : Alter DataFrame index labels or name.
+ Index.rename : Set new names on index.
+
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
@@ -1162,75 +1168,73 @@ def rename_axis(self, mapper=None, **kwargs):
We *highly* recommend using keyword arguments to clarify your
intent.
- See Also
- --------
- Series.rename : Alter Series index labels or name.
- DataFrame.rename : Alter DataFrame index labels or name.
- Index.rename : Set new names on index.
-
Examples
--------
**Series**
- >>> s = pd.Series([1, 2, 3])
- >>> s.rename_axis("foo")
- foo
- 0 1
- 1 2
- 2 3
- dtype: int64
+ >>> s = pd.Series(["dog", "cat", "monkey"])
+ >>> s
+ 0 dog
+ 1 cat
+ 2 monkey
+ dtype: object
+ >>> s.rename_axis("animal")
+ animal
+ 0 dog
+ 1 cat
+ 2 monkey
+ dtype: object
**DataFrame**
- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
- >>> df.rename_axis("foo")
- A B
- foo
- 0 1 4
- 1 2 5
- 2 3 6
-
- >>> df.rename_axis("bar", axis="columns")
- bar A B
- 0 1 4
- 1 2 5
- 2 3 6
-
- >>> mi = pd.MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
- ... names=['let','num'])
- >>> df = pd.DataFrame({'x': [i for i in range(len(mi))],
- ... 'y' : [i*10 for i in range(len(mi))]},
- ... index=mi)
- >>> df.rename_axis(index={'num' : 'n'})
- x y
- let n
- a 1 0 0
- 2 1 10
- b 1 2 20
- 2 3 30
- c 1 4 40
- 2 5 50
-
- >>> cdf = df.rename_axis(columns='col')
- >>> cdf
- col x y
- let num
- a 1 0 0
- 2 1 10
- b 1 2 20
- 2 3 30
- c 1 4 40
- 2 5 50
-
- >>> cdf.rename_axis(columns=str.upper)
- COL x y
- let num
- a 1 0 0
- 2 1 10
- b 1 2 20
- 2 3 30
- c 1 4 40
- 2 5 50
+ >>> df = pd.DataFrame({"num_legs": [4, 4, 2],
+ ... "num_arms": [0, 0, 2]},
+ ... ["dog", "cat", "monkey"])
+ >>> df
+ num_legs num_arms
+ dog 4 0
+ cat 4 0
+ monkey 2 2
+ >>> df = df.rename_axis("animal")
+ >>> df
+ num_legs num_arms
+ animal
+ dog 4 0
+ cat 4 0
+ monkey 2 2
+ >>> df = df.rename_axis("limbs", axis="columns")
+ >>> df
+ limbs num_legs num_arms
+ animal
+ dog 4 0
+ cat 4 0
+ monkey 2 2
+
+ **MultiIndex**
+
+ >>> df.index = pd.MultiIndex.from_product([['mammal'],
+ ... ['dog', 'cat', 'monkey']],
+ ... names=['type', 'name'])
+ >>> df
+ limbs num_legs num_arms
+ type name
+ mammal dog 4 0
+ cat 4 0
+ monkey 2 2
+
+ >>> df.rename_axis(index={'type': 'class'})
+ limbs num_legs num_arms
+ class name
+ mammal dog 4 0
+ cat 4 0
+ monkey 2 2
+
+ >>> df.rename_axis(columns=str.upper)
+ LIMBS num_legs num_arms
+ type name
+ mammal dog 4 0
+ cat 4 0
+ monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments((), kwargs)
copy = kwargs.pop('copy', True)
@@ -1285,45 +1289,57 @@ def rename_axis(self, mapper=None, **kwargs):
def _set_axis_name(self, name, axis=0, inplace=False):
"""
- Alter the name or names of the axis.
+ Set the name(s) of the axis.
Parameters
----------
name : str or list of str
- Name for the Index, or list of names for the MultiIndex
- axis : int or str
- 0 or 'index' for the index; 1 or 'columns' for the columns
- inplace : bool
- whether to modify `self` directly or return a copy
+ Name(s) to set.
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ The axis to set the label. The value 0 or 'index' specifies index,
+ and the value 1 or 'columns' specifies columns.
+ inplace : bool, default False
+ If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
- renamed : same type as caller or None if inplace=True
+ Series, DataFrame, or None
+ The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
- pandas.DataFrame.rename
- pandas.Series.rename
- pandas.Index.rename
+ DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
+ Series.rename : Alter the index labels or set the index name
+ of :class:`Series`.
+ Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
- >>> df._set_axis_name("foo")
- A
- foo
- 0 1
- 1 2
- 2 3
- >>> df.index = pd.MultiIndex.from_product([['A'], ['a', 'b', 'c']])
- >>> df._set_axis_name(["bar", "baz"])
- A
- bar baz
- A a 1
- b 2
- c 3
- """
+ >>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
+ ... ["dog", "cat", "monkey"])
+ >>> df
+ num_legs
+ dog 4
+ cat 4
+ monkey 2
+ >>> df._set_axis_name("animal")
+ num_legs
+ animal
+ dog 4
+ cat 4
+ monkey 2
+ >>> df.index = pd.MultiIndex.from_product(
+ ... [["mammal"], ['dog', 'cat', 'monkey']])
+ >>> df._set_axis_name(["type", "name"])
+ legs
+ type name
+ mammal dog 4
+ cat 4
+ monkey 2
+ """
+ pd.MultiIndex.from_product([["mammal"], ['dog', 'cat', 'monkey']])
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
| Fixed the docstring of the function `_set_axis_name` in `pandas/core/generic.py`
Closes #22895 | https://api.github.com/repos/pandas-dev/pandas/pulls/22969 | 2018-10-03T09:08:31Z | 2018-11-20T02:21:07Z | 2018-11-20T02:21:07Z | 2018-11-26T15:36:35Z |
DOC: Fixed the doctsring for _set_axis_name (GH 22895) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 393e7caae5fab..03e24039f16fe 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1207,31 +1207,34 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
def _set_axis_name(self, name, axis=0, inplace=False):
"""
- Alter the name or names of the axis.
+ Alter the label(s) of the axis.
Parameters
----------
name : str or list of str
- Name for the Index, or list of names for the MultiIndex
- axis : int or str
- 0 or 'index' for the index; 1 or 'columns' for the columns
- inplace : bool
- whether to modify `self` directly or return a copy
+ Labels(s) to set.
+ axis : int or str, default 0
+ The axis to set the label. The value 0 or 'index' specifies index,
+ and the value 1 or 'columns' specifies columns.
+ inplace : bool, default False
+ Whether to modify `self` directly or return a copy.
.. versionadded:: 0.21.0
Returns
-------
- renamed : same type as caller or None if inplace=True
+ Series, DataFrame, Panel, or None
+ The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
- pandas.DataFrame.rename
- pandas.Series.rename
- pandas.Index.rename
+ pandas.DataFrame.rename : Alter the labels of the axis of an object of :class:`DataFrame`.
+ pandas.Series.rename : Alter the name or the labels of an object of :class:`Series`.
+ pandas.Index.rename : Alter the name of an object of :class:`Index` or :class:`MultiIndex`.
Examples
--------
+ >>> df = pd.DataFrame({"A": [1, 2, 3]})
>>> df._set_axis_name("foo")
A
foo
| Fixed the docstring of the function `_set_axis_name` in `pandas/core/generic.py` (#22895) | https://api.github.com/repos/pandas-dev/pandas/pulls/22968 | 2018-10-03T09:01:54Z | 2018-10-03T09:03:12Z | null | 2018-10-03T09:03:12Z |
TST: Fixturize series/test_dtypes.py | diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 125dff9ecfa7c..63ead2dc7d245 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -24,10 +24,8 @@
from pandas import compat
import pandas.util.testing as tm
-from .common import TestData
-
-class TestSeriesDtypes(TestData):
+class TestSeriesDtypes():
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range('20130101', periods=3))
@@ -56,17 +54,17 @@ def test_asobject_deprecated(self):
o = s.asobject
assert isinstance(o, np.ndarray)
- def test_dtype(self):
+ def test_dtype(self, datetime_series):
- assert self.ts.dtype == np.dtype('float64')
- assert self.ts.dtypes == np.dtype('float64')
- assert self.ts.ftype == 'float64:dense'
- assert self.ts.ftypes == 'float64:dense'
- tm.assert_series_equal(self.ts.get_dtype_counts(),
+ assert datetime_series.dtype == np.dtype('float64')
+ assert datetime_series.dtypes == np.dtype('float64')
+ assert datetime_series.ftype == 'float64:dense'
+ assert datetime_series.ftypes == 'float64:dense'
+ tm.assert_series_equal(datetime_series.get_dtype_counts(),
Series(1, ['float64']))
# GH18243 - Assert .get_ftype_counts is deprecated
with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(self.ts.get_ftype_counts(),
+ tm.assert_series_equal(datetime_series.get_ftype_counts(),
Series(1, ['float64:dense']))
@pytest.mark.parametrize("value", [np.nan, np.inf])
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22967 | 2018-10-03T08:59:38Z | 2018-10-04T11:22:22Z | 2018-10-04T11:22:22Z | 2018-10-04T11:23:01Z |
TST: Fixturize series/test_datetime_values.py | diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index fee2323310b9c..e06d3a67db662 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -23,10 +23,8 @@
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
-from .common import TestData
-
-class TestSeriesDatetimeValues(TestData):
+class TestSeriesDatetimeValues():
def test_dt_namespace_accessor(self):
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22966 | 2018-10-03T08:55:49Z | 2018-10-04T11:23:21Z | 2018-10-04T11:23:21Z | 2018-10-04T11:23:21Z |
TST: Fixturize series/test_constructors.py | diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 4817f5bdccc29..57a3f54fadbcc 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -26,10 +26,8 @@
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
-from .common import TestData
-
-class TestSeriesConstructors(TestData):
+class TestSeriesConstructors():
def test_invalid_dtype(self):
# GH15520
@@ -50,23 +48,23 @@ def test_scalar_conversion(self):
assert int(Series([1.])) == 1
assert long(Series([1.])) == 1
- def test_constructor(self):
- assert self.ts.index.is_all_dates
+ def test_constructor(self, datetime_series, empty_series):
+ assert datetime_series.index.is_all_dates
# Pass in Series
- derived = Series(self.ts)
+ derived = Series(datetime_series)
assert derived.index.is_all_dates
- assert tm.equalContents(derived.index, self.ts.index)
+ assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
- assert id(self.ts.index) == id(derived.index)
+ assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
- assert not self.empty.index.is_all_dates
+ assert not empty_series.index.is_all_dates
assert not Series({}).index.is_all_dates
pytest.raises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
@@ -977,27 +975,27 @@ def test_fromDict(self):
series = Series(data, dtype=float)
assert series.dtype == np.float64
- def test_fromValue(self):
+ def test_fromValue(self, datetime_series):
- nans = Series(np.NaN, index=self.ts.index)
+ nans = Series(np.NaN, index=datetime_series.index)
assert nans.dtype == np.float_
- assert len(nans) == len(self.ts)
+ assert len(nans) == len(datetime_series)
- strings = Series('foo', index=self.ts.index)
+ strings = Series('foo', index=datetime_series.index)
assert strings.dtype == np.object_
- assert len(strings) == len(self.ts)
+ assert len(strings) == len(datetime_series)
d = datetime.now()
- dates = Series(d, index=self.ts.index)
+ dates = Series(d, index=datetime_series.index)
assert dates.dtype == 'M8[ns]'
- assert len(dates) == len(self.ts)
+ assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
- categorical = Series(0, index=self.ts.index, dtype="category")
- expected = Series(0, index=self.ts.index).astype("category")
+ categorical = Series(0, index=datetime_series.index, dtype="category")
+ expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == 'category'
- assert len(categorical) == len(self.ts)
+ assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22965 | 2018-10-03T08:46:26Z | 2018-10-04T11:23:40Z | 2018-10-04T11:23:40Z | 2018-10-04T13:27:08Z |
TST: Fixturize series/test_combine_concat.py | diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 35ba4fbf0ce25..8b021ab81ff81 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -15,29 +15,28 @@
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
-from .common import TestData
+class TestSeriesCombine():
-class TestSeriesCombine(TestData):
-
- def test_append(self):
- appendedSeries = self.series.append(self.objSeries)
+ def test_append(self, datetime_series, string_series, object_series):
+ appendedSeries = string_series.append(object_series)
for idx, value in compat.iteritems(appendedSeries):
- if idx in self.series.index:
- assert value == self.series[idx]
- elif idx in self.objSeries.index:
- assert value == self.objSeries[idx]
+ if idx in string_series.index:
+ assert value == string_series[idx]
+ elif idx in object_series.index:
+ assert value == object_series[idx]
else:
raise AssertionError("orphaned index!")
- pytest.raises(ValueError, self.ts.append, self.ts,
+ pytest.raises(ValueError, datetime_series.append, datetime_series,
verify_integrity=True)
- def test_append_many(self):
- pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
+ def test_append_many(self, datetime_series):
+ pieces = [datetime_series[:5], datetime_series[5:10],
+ datetime_series[10:]]
result = pieces[0].append(pieces[1:])
- assert_series_equal(result, self.ts)
+ assert_series_equal(result, datetime_series)
def test_append_duplicates(self):
# GH 13677
| This is in reference to issue #22550 | https://api.github.com/repos/pandas-dev/pandas/pulls/22964 | 2018-10-03T08:38:21Z | 2018-10-04T11:24:06Z | 2018-10-04T11:24:06Z | 2018-10-04T13:26:53Z |
BUG GH22858 When creating empty dataframe, only cast int to float if index given | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 9b71ab656920d..606f29ef75ba7 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -199,6 +199,7 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- A newly constructed empty :class:`DataFrame` with integer as the ``dtype`` will now only be cast to ``float64`` if ``index`` is specified (:issue:`22858`)
.. _whatsnew_0240.api_breaking.interval_values:
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 410e061c895db..a95a45d5f9ae4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1220,7 +1220,9 @@ def construct_1d_arraylike_from_scalar(value, length, dtype):
dtype = dtype.dtype
# coerce if we have nan for an integer dtype
- if is_integer_dtype(dtype) and isna(value):
+ # GH 22858: only cast to float if an index
+ # (passed here as length) is specified
+ if length and is_integer_dtype(dtype) and isna(value):
dtype = np.float64
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 2f1c9e05a01b0..e2be410d51b88 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -798,25 +798,20 @@ def test_constructor_mrecarray(self):
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
- def test_constructor_corner(self):
+ def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
- # empty but with specified dtype
- df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
- assert df.values.dtype == np.object_
-
- # does not error but ends up float
- df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
- assert df.values.dtype == np.dtype('float64')
-
- # #1783 empty dtype object
- df = DataFrame({}, columns=['foo', 'bar'])
- assert df.values.dtype == np.object_
-
- df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
- dtype=int)
- assert df.values.dtype == np.dtype('float64')
+ @pytest.mark.parametrize("data, index, columns, dtype, expected", [
+ (None, lrange(10), ['a', 'b'], object, np.object_),
+ (None, None, ['a', 'b'], 'int64', np.dtype('int64')),
+ (None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
+ ({}, None, ['foo', 'bar'], None, np.object_),
+ ({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
+ ])
+ def test_constructor_dtype(self, data, index, columns, dtype, expected):
+ df = DataFrame(data, index, columns, dtype)
+ assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
| - [X] closes #22858
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Previously, when creating a dataframe with no data of dtype int, the dtype would be changed to float. This is necessary when a predefined number of rows is included as the index parameter, so that they can be filled with nan. However, when no index is passed, this cast is unexpected. This PR changes it so dtype is only altered when necessary. | https://api.github.com/repos/pandas-dev/pandas/pulls/22963 | 2018-10-03T05:22:58Z | 2018-10-04T23:13:58Z | 2018-10-04T23:13:58Z | 2018-10-05T04:34:22Z |
CLN: Move some PI/DTI methods to EA subclasses, implement tests | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a0a9b57712249..7daaa8de1734f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -31,7 +31,7 @@
from pandas.core.algorithms import checked_add_with_arr
from pandas.core import ops
-from pandas.tseries.frequencies import to_offset
+from pandas.tseries.frequencies import to_offset, get_period_alias
from pandas.tseries.offsets import Tick, generate_range
from pandas.core.arrays import datetimelike as dtl
@@ -200,6 +200,10 @@ def __new__(cls, values, freq=None, tz=None, dtype=None):
# e.g. DatetimeIndex
tz = values.tz
+ if freq is None and hasattr(values, "freq"):
+ # i.e. DatetimeArray, DatetimeIndex
+ freq = values.freq
+
freq, freq_infer = dtl.maybe_infer_freq(freq)
# if dtype has an embedded tz, capture it
@@ -764,6 +768,67 @@ def normalize(self):
new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values, freq='infer').tz_localize(self.tz)
+ def to_period(self, freq=None):
+ """
+ Cast to PeriodArray/Index at a particular frequency.
+
+ Converts DatetimeArray/Index to PeriodArray/Index.
+
+ Parameters
+ ----------
+ freq : string or Offset, optional
+ One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
+ or an Offset object. Will be inferred by default.
+
+ Returns
+ -------
+ PeriodArray/Index
+
+ Raises
+ ------
+ ValueError
+ When converting a DatetimeArray/Index with non-regular values,
+ so that a frequency cannot be inferred.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"y": [1,2,3]},
+ ... index=pd.to_datetime(["2000-03-31 00:00:00",
+ ... "2000-05-31 00:00:00",
+ ... "2000-08-31 00:00:00"]))
+ >>> df.index.to_period("M")
+ PeriodIndex(['2000-03', '2000-05', '2000-08'],
+ dtype='period[M]', freq='M')
+
+ Infer the daily frequency
+
+ >>> idx = pd.date_range("2017-01-01", periods=2)
+ >>> idx.to_period()
+ PeriodIndex(['2017-01-01', '2017-01-02'],
+ dtype='period[D]', freq='D')
+
+ See also
+ --------
+ pandas.PeriodIndex: Immutable ndarray holding ordinal values
+ pandas.DatetimeIndex.to_pydatetime: Return DatetimeIndex as object
+ """
+ from pandas.core.arrays.period import PeriodArrayMixin
+
+ if self.tz is not None:
+ warnings.warn("Converting to PeriodArray/Index representation "
+ "will drop timezone information.", UserWarning)
+
+ if freq is None:
+ freq = self.freqstr or self.inferred_freq
+
+ if freq is None:
+ raise ValueError("You must pass a freq argument as "
+ "current index has none.")
+
+ freq = get_period_alias(freq)
+
+ return PeriodArrayMixin(self.values, freq=freq)
+
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 41b4c5c669efc..9e877de1a3c0a 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -10,14 +10,15 @@
Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX,
get_period_field_arr, period_asfreq_arr)
from pandas._libs.tslibs import period as libperiod
-from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
+from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas import compat
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
- is_integer_dtype, is_float_dtype, is_period_dtype)
+ is_integer_dtype, is_float_dtype, is_period_dtype,
+ is_datetime64_dtype)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCSeries
@@ -127,6 +128,10 @@ def __new__(cls, values, freq=None, **kwargs):
freq = values.freq
values = values.asi8
+ elif is_datetime64_dtype(values):
+ # TODO: what if it has tz?
+ values = dt64arr_to_periodarr(values, freq)
+
return cls._simple_new(values, freq, **kwargs)
@classmethod
@@ -207,6 +212,14 @@ def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return isleapyear_arr(np.asarray(self.year))
+ @property
+ def start_time(self):
+ return self.to_timestamp(how='start')
+
+ @property
+ def end_time(self):
+ return self.to_timestamp(how='end')
+
def asfreq(self, freq=None, how='E'):
"""
Convert the Period Array/Index to the specified frequency `freq`.
@@ -266,6 +279,48 @@ def asfreq(self, freq=None, how='E'):
return self._shallow_copy(new_data, freq=freq)
+ def to_timestamp(self, freq=None, how='start'):
+ """
+ Cast to DatetimeArray/Index
+
+ Parameters
+ ----------
+ freq : string or DateOffset, optional
+ Target frequency. The default is 'D' for week or longer,
+ 'S' otherwise
+ how : {'s', 'e', 'start', 'end'}
+
+ Returns
+ -------
+ DatetimeArray/Index
+ """
+ from pandas.core.arrays.datetimes import DatetimeArrayMixin
+
+ how = libperiod._validate_end_alias(how)
+
+ end = how == 'E'
+ if end:
+ if freq == 'B':
+ # roll forward to ensure we land on B date
+ adjust = Timedelta(1, 'D') - Timedelta(1, 'ns')
+ return self.to_timestamp(how='start') + adjust
+ else:
+ adjust = Timedelta(1, 'ns')
+ return (self + 1).to_timestamp(how='start') - adjust
+
+ if freq is None:
+ base, mult = frequencies.get_freq_code(self.freq)
+ freq = frequencies.get_to_timestamp_base(base)
+ else:
+ freq = Period._maybe_convert_freq(freq)
+
+ base, mult = frequencies.get_freq_code(freq)
+ new_data = self.asfreq(freq, how=how)
+
+ new_data = libperiod.periodarr_to_dt64arr(new_data._ndarray_values,
+ base)
+ return DatetimeArrayMixin(new_data, freq='infer')
+
# ------------------------------------------------------------------
# Arithmetic Methods
@@ -392,6 +447,15 @@ def _maybe_convert_timedelta(self, other):
# -------------------------------------------------------------------
# Constructor Helpers
+def dt64arr_to_periodarr(data, freq, tz=None):
+ if data.dtype != np.dtype('M8[ns]'):
+ raise ValueError('Wrong dtype: %s' % data.dtype)
+
+ freq = Period._maybe_convert_freq(freq)
+ base, mult = frequencies.get_freq_code(freq)
+ return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz)
+
+
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index a6cdaa0c2163a..e40ceadc1a083 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -36,7 +36,7 @@
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index, Float64Index
import pandas.compat as compat
-from pandas.tseries.frequencies import to_offset, get_period_alias, Resolution
+from pandas.tseries.frequencies import to_offset, Resolution
from pandas.core.indexes.datetimelike import (
DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin)
from pandas.tseries.offsets import (
@@ -302,7 +302,8 @@ def __new__(cls, data=None,
tz=tz, normalize=normalize,
closed=closed, ambiguous=ambiguous)
- if not isinstance(data, (np.ndarray, Index, ABCSeries)):
+ if not isinstance(data, (np.ndarray, Index, ABCSeries,
+ DatetimeArrayMixin)):
if is_scalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
@@ -673,67 +674,12 @@ def to_series(self, keep_tz=False, index=None, name=None):
return Series(values, index=index, name=name)
+ @Appender(DatetimeArrayMixin.to_period.__doc__)
def to_period(self, freq=None):
- """
- Cast to PeriodIndex at a particular frequency.
-
- Converts DatetimeIndex to PeriodIndex.
-
- Parameters
- ----------
- freq : string or Offset, optional
- One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
- or an Offset object. Will be inferred by default.
-
- Returns
- -------
- PeriodIndex
-
- Raises
- ------
- ValueError
- When converting a DatetimeIndex with non-regular values, so that a
- frequency cannot be inferred.
-
- Examples
- --------
- >>> df = pd.DataFrame({"y": [1,2,3]},
- ... index=pd.to_datetime(["2000-03-31 00:00:00",
- ... "2000-05-31 00:00:00",
- ... "2000-08-31 00:00:00"]))
- >>> df.index.to_period("M")
- PeriodIndex(['2000-03', '2000-05', '2000-08'],
- dtype='period[M]', freq='M')
-
- Infer the daily frequency
-
- >>> idx = pd.date_range("2017-01-01", periods=2)
- >>> idx.to_period()
- PeriodIndex(['2017-01-01', '2017-01-02'],
- dtype='period[D]', freq='D')
-
- See also
- --------
- pandas.PeriodIndex: Immutable ndarray holding ordinal values
- pandas.DatetimeIndex.to_pydatetime: Return DatetimeIndex as object
- """
from pandas.core.indexes.period import PeriodIndex
- if self.tz is not None:
- warnings.warn("Converting to PeriodIndex representation will "
- "drop timezone information.", UserWarning)
-
- if freq is None:
- freq = self.freqstr or self.inferred_freq
-
- if freq is None:
- msg = ("You must pass a freq argument as "
- "current index has none.")
- raise ValueError(msg)
-
- freq = get_period_alias(freq)
-
- return PeriodIndex(self.values, name=self.name, freq=freq)
+ result = DatetimeArrayMixin.to_period(self, freq=freq)
+ return PeriodIndex(result, name=self.name)
def snap(self, freq='S'):
"""
@@ -758,6 +704,7 @@ def snap(self, freq='S'):
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
+ # TODO: what about self.name? if so, use shallow_copy?
def unique(self, level=None):
# Override here since IndexOpsMixin.unique uses self._values.unique
@@ -769,8 +716,7 @@ def unique(self, level=None):
else:
naive = self
result = super(DatetimeIndex, naive).unique(level=level)
- return self._simple_new(result.values, name=self.name, tz=self.tz,
- freq=self.freq)
+ return self._shallow_copy(result.values)
def union(self, other):
"""
@@ -1421,8 +1367,7 @@ def insert(self, loc, item):
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
- return DatetimeIndex(new_dates, name=self.name, freq=freq,
- tz=self.tz)
+ return self._shallow_copy(new_dates, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
@@ -1458,7 +1403,7 @@ def delete(self, loc):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
- return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
+ return self._shallow_copy(new_dates, freq=freq)
def indexer_at_time(self, time, asof=False):
"""
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index cc008694a8b84..7833dd851db34 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -17,7 +17,6 @@
pandas_dtype,
ensure_object)
-import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index
@@ -25,13 +24,13 @@
from pandas.core.tools.datetimes import parse_time_string
from pandas._libs.lib import infer_dtype
-from pandas._libs import tslib, index as libindex, Timedelta
+from pandas._libs import tslib, index as libindex
from pandas._libs.tslibs.period import (Period, IncompatibleFrequency,
- DIFFERENT_FREQ_INDEX,
- _validate_end_alias)
+ DIFFERENT_FREQ_INDEX)
from pandas._libs.tslibs import resolution, period
-from pandas.core.arrays.period import PeriodArrayMixin
+from pandas.core.arrays import datetimelike as dtl
+from pandas.core.arrays.period import PeriodArrayMixin, dt64arr_to_periodarr
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs, ensure_index
@@ -56,14 +55,6 @@ def f(self):
return property(f)
-def dt64arr_to_periodarr(data, freq, tz):
- if data.dtype != np.dtype('M8[ns]'):
- raise ValueError('Wrong dtype: %s' % data.dtype)
-
- freq = Period._maybe_convert_freq(freq)
- base, mult = _gfc(freq)
- return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
-
# --- Period index sketch
@@ -185,12 +176,7 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
raise TypeError('__new__() got an unexpected keyword argument {}'.
format(list(set(fields) - valid_field_set)[0]))
- if periods is not None:
- if is_float(periods):
- periods = int(periods)
- elif not is_integer(periods):
- msg = 'periods must be a number, got {periods}'
- raise TypeError(msg.format(periods=periods))
+ periods = dtl.validate_periods(periods)
if name is None and hasattr(data, 'name'):
name = data.name
@@ -461,55 +447,23 @@ def is_full(self):
daysinmonth = days_in_month
@property
+ @Appender(PeriodArrayMixin.start_time.__doc__)
def start_time(self):
- return self.to_timestamp(how='start')
+ return PeriodArrayMixin.start_time.fget(self)
@property
+ @Appender(PeriodArrayMixin.end_time.__doc__)
def end_time(self):
- return self.to_timestamp(how='end')
+ return PeriodArrayMixin.end_time.fget(self)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object).values
+ @Appender(PeriodArrayMixin.to_timestamp.__doc__)
def to_timestamp(self, freq=None, how='start'):
- """
- Cast to DatetimeIndex
-
- Parameters
- ----------
- freq : string or DateOffset, optional
- Target frequency. The default is 'D' for week or longer,
- 'S' otherwise
- how : {'s', 'e', 'start', 'end'}
-
- Returns
- -------
- DatetimeIndex
- """
- how = _validate_end_alias(how)
-
- end = how == 'E'
- if end:
- if freq == 'B':
- # roll forward to ensure we land on B date
- adjust = Timedelta(1, 'D') - Timedelta(1, 'ns')
- return self.to_timestamp(how='start') + adjust
- else:
- adjust = Timedelta(1, 'ns')
- return (self + 1).to_timestamp(how='start') - adjust
-
- if freq is None:
- base, mult = _gfc(self.freq)
- freq = frequencies.get_to_timestamp_base(base)
- else:
- freq = Period._maybe_convert_freq(freq)
-
- base, mult = _gfc(freq)
- new_data = self.asfreq(freq, how)
-
- new_data = period.periodarr_to_dt64arr(new_data._ndarray_values, base)
- return DatetimeIndex(new_data, freq='infer', name=self.name)
+ result = PeriodArrayMixin.to_timestamp(self, freq=freq, how=how)
+ return DatetimeIndex(result, name=self.name)
@property
def inferred_type(self):
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 933bc6233dca9..ee604f44b98e0 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -668,7 +668,7 @@ def insert(self, loc, item):
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
- return TimedeltaIndex(new_tds, name=self.name, freq=freq)
+ return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 24f34884dc077..6bb4241451b3f 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -1,13 +1,50 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
import pandas as pd
+import pandas.util.testing as tm
from pandas.core.arrays.datetimes import DatetimeArrayMixin
from pandas.core.arrays.timedeltas import TimedeltaArrayMixin
from pandas.core.arrays.period import PeriodArrayMixin
+# TODO: more freq variants
+@pytest.fixture(params=['D', 'B', 'W', 'M', 'Q', 'Y'])
+def period_index(request):
+ """
+ A fixture to provide PeriodIndex objects with different frequencies.
+
+ Most PeriodArray behavior is already tested in PeriodIndex tests,
+ so here we just test that the PeriodArray behavior matches
+ the PeriodIndex behavior.
+ """
+ freqstr = request.param
+ # TODO: non-monotone indexes; NaTs, different start dates
+ pi = pd.period_range(start=pd.Timestamp('2000-01-01'),
+ periods=100,
+ freq=freqstr)
+ return pi
+
+
+@pytest.fixture(params=['D', 'B', 'W', 'M', 'Q', 'Y'])
+def datetime_index(request):
+ """
+ A fixture to provide DatetimeIndex objects with different frequencies.
+
+ Most DatetimeArray behavior is already tested in DatetimeIndex tests,
+ so here we just test that the DatetimeIndex behavior matches
+ the DatetimeIndex behavior.
+ """
+ freqstr = request.param
+ # TODO: non-monotone indexes; NaTs, different start dates, timezones
+ pi = pd.date_range(start=pd.Timestamp('2000-01-01'),
+ periods=100,
+ freq=freqstr)
+ return pi
+
+
class TestDatetimeArray(object):
def test_from_dti(self, tz_naive_fixture):
@@ -30,6 +67,41 @@ def test_astype_object(self, tz_naive_fixture):
assert asobj.dtype == 'O'
assert list(asobj) == list(dti)
+ @pytest.mark.parametrize('freqstr', ['D', 'B', 'W', 'M', 'Q', 'Y'])
+ def test_to_period(self, datetime_index, freqstr):
+ dti = datetime_index
+ arr = DatetimeArrayMixin(dti)
+
+ expected = dti.to_period(freq=freqstr)
+ result = arr.to_period(freq=freqstr)
+ assert isinstance(result, PeriodArrayMixin)
+
+ # placeholder until these become actual EA subclasses and we can use
+ # an EA-specific tm.assert_ function
+ tm.assert_index_equal(pd.Index(result), pd.Index(expected))
+
+ @pytest.mark.parametrize('propname', pd.DatetimeIndex._bool_ops)
+ def test_bool_properties(self, datetime_index, propname):
+ # in this case _bool_ops is just `is_leap_year`
+ dti = datetime_index
+ arr = DatetimeArrayMixin(dti)
+ assert dti.freq == arr.freq
+
+ result = getattr(arr, propname)
+ expected = np.array(getattr(dti, propname), dtype=result.dtype)
+
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('propname', pd.DatetimeIndex._field_ops)
+ def test_int_properties(self, datetime_index, propname):
+ dti = datetime_index
+ arr = DatetimeArrayMixin(dti)
+
+ result = getattr(arr, propname)
+ expected = np.array(getattr(dti, propname), dtype=result.dtype)
+
+ tm.assert_numpy_array_equal(result, expected)
+
class TestTimedeltaArray(object):
def test_from_tdi(self):
@@ -53,20 +125,54 @@ def test_astype_object(self):
class TestPeriodArray(object):
- def test_from_pi(self):
- pi = pd.period_range('2016', freq='Q', periods=3)
+ def test_from_pi(self, period_index):
+ pi = period_index
arr = PeriodArrayMixin(pi)
assert list(arr) == list(pi)
- # Check that Index.__new__ knows what to do with TimedeltaArray
+ # Check that Index.__new__ knows what to do with PeriodArray
pi2 = pd.Index(arr)
assert isinstance(pi2, pd.PeriodIndex)
assert list(pi2) == list(arr)
- def test_astype_object(self):
- pi = pd.period_range('2016', freq='Q', periods=3)
+ def test_astype_object(self, period_index):
+ pi = period_index
arr = PeriodArrayMixin(pi)
asobj = arr.astype('O')
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == 'O'
assert list(asobj) == list(pi)
+
+ @pytest.mark.parametrize('how', ['S', 'E'])
+ def test_to_timestamp(self, how, period_index):
+ pi = period_index
+ arr = PeriodArrayMixin(pi)
+
+ expected = DatetimeArrayMixin(pi.to_timestamp(how=how))
+ result = arr.to_timestamp(how=how)
+ assert isinstance(result, DatetimeArrayMixin)
+
+ # placeholder until these become actual EA subclasses and we can use
+ # an EA-specific tm.assert_ function
+ tm.assert_index_equal(pd.Index(result), pd.Index(expected))
+
+ @pytest.mark.parametrize('propname', pd.PeriodIndex._bool_ops)
+ def test_bool_properties(self, period_index, propname):
+ # in this case _bool_ops is just `is_leap_year`
+ pi = period_index
+ arr = PeriodArrayMixin(pi)
+
+ result = getattr(arr, propname)
+ expected = np.array(getattr(pi, propname))
+
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('propname', pd.PeriodIndex._field_ops)
+ def test_int_properties(self, period_index, propname):
+ pi = period_index
+ arr = PeriodArrayMixin(pi)
+
+ result = getattr(arr, propname)
+ expected = np.array(getattr(pi, propname))
+
+ tm.assert_numpy_array_equal(result, expected)
| If/when this goes through the basic template it creates for tests will be pretty straightforward to extend to the other relevant methods/properties.
@TomAugspurger this definitely overlaps with #22862, particularly the pieces touching constructors. LMK if this causes problems and I can try to stay in my lane. | https://api.github.com/repos/pandas-dev/pandas/pulls/22961 | 2018-10-03T02:58:05Z | 2018-10-08T20:07:30Z | 2018-10-08T20:07:30Z | 2020-04-05T17:38:58Z |
CI: pin moto to 1.3.4 | diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml
index a921bcb46dba4..6955db363ca1f 100644
--- a/ci/travis-27.yaml
+++ b/ci/travis-27.yaml
@@ -44,7 +44,7 @@ dependencies:
# universal
- pytest
- pytest-xdist
- - moto
+ - moto==1.3.4
- hypothesis>=3.58.0
- pip:
- backports.lzma
| xref https://github.com/pandas-dev/pandas/issues/22934
Looking back at the logs, moto 1.3.4 didn't seem to have an issues. Going to run this on the CI a few times to see if we get any failures. May have to make some additional pins of boto if this doesn't work. | https://api.github.com/repos/pandas-dev/pandas/pulls/22959 | 2018-10-03T01:36:07Z | 2018-10-03T13:49:44Z | 2018-10-03T13:49:44Z | 2018-10-03T13:49:45Z |
CLN: Use is_period_dtype instead of ABCPeriodIndex checks | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 481d5313f0e25..41b4c5c669efc 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -264,7 +264,7 @@ def asfreq(self, freq=None, how='E'):
if self.hasnans:
new_data[self._isnan] = iNaT
- return self._simple_new(new_data, self.name, freq=freq)
+ return self._shallow_copy(new_data, freq=freq)
# ------------------------------------------------------------------
# Arithmetic Methods
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 37a12a588db03..1ec30ecbb3a3b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -21,6 +21,7 @@
is_list_like,
is_scalar,
is_bool_dtype,
+ is_period_dtype,
is_categorical_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
@@ -28,7 +29,7 @@
is_object_dtype,
is_string_dtype)
from pandas.core.dtypes.generic import (
- ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass)
+ ABCIndex, ABCSeries, ABCIndexClass)
from pandas.core.dtypes.missing import isna
from pandas.core import common as com, algorithms, ops
@@ -239,9 +240,8 @@ def equals(self, other):
# have different timezone
return False
- # ToDo: Remove this when PeriodDtype is added
- elif isinstance(self, ABCPeriodIndex):
- if not isinstance(other, ABCPeriodIndex):
+ elif is_period_dtype(self):
+ if not is_period_dtype(other):
return False
if self.freq != other.freq:
return False
@@ -359,7 +359,7 @@ def sort_values(self, return_indexer=False, ascending=True):
attribs = self._get_attributes_dict()
freq = attribs['freq']
- if freq is not None and not isinstance(self, ABCPeriodIndex):
+ if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
@@ -386,8 +386,8 @@ def take(self, indices, axis=0, allow_fill=True,
fill_value=fill_value,
na_value=iNaT)
- # keep freq in PeriodIndex, reset otherwise
- freq = self.freq if isinstance(self, ABCPeriodIndex) else None
+ # keep freq in PeriodArray/Index, reset otherwise
+ freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(taken, freq=freq)
_can_hold_na = True
@@ -618,7 +618,7 @@ def repeat(self, repeats, *args, **kwargs):
Analogous to ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
- if isinstance(self, ABCPeriodIndex):
+ if is_period_dtype(self):
freq = self.freq
else:
freq = None
@@ -673,7 +673,7 @@ def _concat_same_dtype(self, to_concat, name):
attribs = self._get_attributes_dict()
attribs['name'] = name
- if not isinstance(self, ABCPeriodIndex):
+ if not is_period_dtype(self):
# reset freq
attribs['freq'] = None
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 119a607fc0e68..6091df776a01b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1001,7 +1001,7 @@ def _try_mi(k):
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
- except (KeyError):
+ except KeyError:
raise
except (IndexError, ValueError, TypeError):
pass
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 981bfddeadac1..fd8e17c369f5a 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -512,33 +512,33 @@ def __getitem__(self, key):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
- l = len(self)
+ length = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
- start = l - 1 if step < 0 else 0
+ start = length - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
- start += l
+ start += length
if start < 0:
start = -1 if step < 0 else 0
- if start >= l:
- start = l - 1 if step < 0 else l
+ if start >= length:
+ start = length - 1 if step < 0 else length
if key.stop is None:
- stop = -1 if step < 0 else l
+ stop = -1 if step < 0 else length
else:
stop = key.stop
if stop < 0:
- stop += l
+ stop += length
if stop < 0:
stop = -1
- if stop > l:
- stop = l
+ if stop > length:
+ stop = length
# delegate non-integer slices
if (start != int(start) or
| Will be more generally correct for catching both PeriodIndex and PeriodArray.
Unrelated change of `l` -> `length` | https://api.github.com/repos/pandas-dev/pandas/pulls/22958 | 2018-10-03T01:28:04Z | 2018-10-03T09:19:28Z | 2018-10-03T09:19:28Z | 2018-10-03T12:17:32Z |
CLN: small clean-up of IntervalIndex | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 90df596b98296..134999f05364f 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -108,12 +108,7 @@ class IntervalArray(IntervalMixin, ExtensionArray):
_na_value = _fill_value = np.nan
def __new__(cls, data, closed=None, dtype=None, copy=False,
- fastpath=False, verify_integrity=True):
-
- if fastpath:
- return cls._simple_new(data.left, data.right, closed,
- copy=copy, dtype=dtype,
- verify_integrity=False)
+ verify_integrity=True):
if isinstance(data, ABCSeries) and is_interval_dtype(data):
data = data.values
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 4b125580bd7e0..f72f87aeb2af6 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -146,17 +146,13 @@ class IntervalIndex(IntervalMixin, Index):
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
- name=None, fastpath=False, verify_integrity=True):
-
- if fastpath:
- return cls._simple_new(data, name)
+ name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
- fastpath=fastpath,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@@ -187,14 +183,6 @@ def _shallow_copy(self, left=None, right=None, **kwargs):
attributes.update(kwargs)
return self._simple_new(result, **attributes)
- @cache_readonly
- def hasnans(self):
- """
- Return if the IntervalIndex has any nans; enables various performance
- speedups
- """
- return self._isnan.any()
-
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
@@ -206,10 +194,6 @@ def _isnan(self):
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
- @property
- def _constructor(self):
- return type(self)
-
def __contains__(self, key):
"""
return a boolean if this key is IN the index
@@ -394,18 +378,7 @@ def _values(self):
@cache_readonly
def _ndarray_values(self):
- left = self.left
- right = self.right
- mask = self._isnan
- closed = self.closed
-
- result = np.empty(len(left), dtype=object)
- for i in range(len(left)):
- if mask[i]:
- result[i] = np.nan
- else:
- result[i] = Interval(left[i], right[i], closed)
- return result
+ return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
@@ -892,18 +865,12 @@ def take(self, indices, axis=0, allow_fill=True,
return self._simple_new(result, **attributes)
def __getitem__(self, value):
- mask = self._isnan[value]
- if is_scalar(mask) and mask:
- return self._na_value
-
- left = self.left[value]
- right = self.right[value]
-
- # scalar
- if not isinstance(left, Index):
- return Interval(left, right, self.closed)
-
- return self._shallow_copy(left, right)
+ result = self._data[value]
+ if isinstance(result, IntervalArray):
+ return self._shallow_copy(result)
+ else:
+ # scalar
+ return result
# __repr__ associated methods are based on MultiIndex
| Removed some parts of IntervalIndex that are not needed (because they are inherited from Index or can be dispatched to the array)
\+ removed `fastpath` (for IntervalArray this is no problem (never used), for IntervalIndex I am not fully sure, since this signature might need to match the other index classes)
cc @jschendel @TomAugspurger | https://api.github.com/repos/pandas-dev/pandas/pulls/22956 | 2018-10-02T21:20:04Z | 2018-10-03T07:24:37Z | 2018-10-03T07:24:37Z | 2018-10-03T07:25:07Z |
Support for PEP 3141 numbers | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index f94aa3d320b75..22f2add40c451 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -465,6 +465,7 @@ For situations where you need an ``ndarray`` of ``Interval`` objects, use
np.asarray(idx)
idx.values.astype(object)
+
.. _whatsnew_0240.api.timezone_offset_parsing:
Parsing Datetime Strings with Timezone Offsets
@@ -1471,6 +1472,7 @@ Other
- :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`)
- :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly.
- Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`)
+- Checking PEP 3141 numbers in :func:`~pandas.api.types.is_scalar` function returns ``True`` (:issue:`22903`)
- Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`)
.. _whatsnew_0.24.0.contributors:
@@ -1479,3 +1481,4 @@ Contributors
~~~~~~~~~~~~
.. contributors:: v0.23.4..HEAD
+
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index e89c8fa579687..ad538ff103c2f 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1,5 +1,8 @@
# -*- coding: utf-8 -*-
from decimal import Decimal
+from fractions import Fraction
+from numbers import Number
+
import sys
import cython
@@ -15,7 +18,6 @@ from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT
-
import numpy as np
cimport numpy as cnp
from numpy cimport (ndarray, PyArray_GETITEM,
@@ -105,23 +107,54 @@ def is_scalar(val: object) -> bool:
"""
Return True if given value is scalar.
- This includes:
- - numpy array scalar (e.g. np.int64)
- - Python builtin numerics
- - Python builtin byte arrays and strings
- - None
- - instances of datetime.datetime
- - instances of datetime.timedelta
- - Period
- - instances of decimal.Decimal
- - Interval
- - DateOffset
+ Parameters
+ ----------
+ val : object
+ This includes:
+
+ - numpy array scalar (e.g. np.int64)
+ - Python builtin numerics
+ - Python builtin byte arrays and strings
+ - None
+ - datetime.datetime
+ - datetime.timedelta
+ - Period
+ - decimal.Decimal
+ - Interval
+ - DateOffset
+ - Fraction
+ - Number
+
+ Returns
+ -------
+ bool
+ Return True if given object is scalar, False otherwise
+
+ Examples
+ --------
+ >>> dt = pd.datetime.datetime(2018, 10, 3)
+ >>> pd.is_scalar(dt)
+ True
+
+ >>> pd.api.types.is_scalar([2, 3])
+ False
+
+ >>> pd.api.types.is_scalar({0: 1, 2: 3})
+ False
+
+ >>> pd.api.types.is_scalar((0, 2))
+ False
+
+ pandas supports PEP 3141 numbers:
+ >>> from fractions import Fraction
+ >>> pd.api.types.is_scalar(Fraction(3, 5))
+ True
"""
return (cnp.PyArray_IsAnyScalar(val)
# As of numpy-1.9, PyArray_IsAnyScalar misses bytearrays on Py3.
- or isinstance(val, bytes)
+ or isinstance(val, (bytes, Fraction, Number))
# We differ from numpy (as of 1.10), which claims that None is
# not scalar in np.isscalar().
or val is None
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index f2552cffc6651..fd3222cd1119b 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -10,10 +10,11 @@
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
+from numbers import Number
+from fractions import Fraction
import numpy as np
import pytz
import pytest
-
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
@@ -1183,6 +1184,8 @@ def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
+ assert is_scalar(Number())
+ assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
| - [x] closes #22903
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22952 | 2018-10-02T19:57:09Z | 2018-11-20T10:41:34Z | 2018-11-20T10:41:33Z | 2018-11-20T10:41:57Z |
CI: deduplicate skip printing | diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index 2b2d4d5488b91..dcc5a14d7b3b4 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -28,16 +28,16 @@ if [ "$DOC" ]; then
elif [ "$COVERAGE" ]; then
echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
elif [ "$SLOW" ]; then
TEST_ARGS="--only-slow --skip-network"
- echo pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ echo pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
else
- echo pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
- pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas # TODO: doctest
+ echo pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas # TODO: doctest
fi
diff --git a/ci/script_single.sh b/ci/script_single.sh
index ed12ee35b9151..09e7446a2d876 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -25,14 +25,14 @@ if [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
elif [ "$COVERAGE" ]; then
- echo pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
- pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
- echo pytest -s -r xXs --strict scripts
- pytest -s -r xXs --strict scripts
+ echo pytest -s --strict scripts
+ pytest -s --strict scripts
else
- echo pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas
- pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest
+ echo pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas
+ pytest -m "single" --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest
fi
| Trying to cut down on the noise in the travis log. Right now, we print out skipped / xfailed tests twice. This removes the `-r` flag passed to `pytest`. The skipped tests output will still be available under the output of `ci/print_skipped.py`. I believe that azure automatically parses the XML output anyway.
cc @datapythonista | https://api.github.com/repos/pandas-dev/pandas/pulls/22950 | 2018-10-02T18:38:06Z | 2018-10-08T13:42:22Z | 2018-10-08T13:42:21Z | 2018-10-08T13:42:25Z |
Use ._tshift internally for datetimelike ops | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 91c119808db52..1ce60510c6a69 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -455,7 +455,7 @@ def _sub_period_array(self, other):
def _addsub_int_array(self, other, op):
"""
Add or subtract array-like of integers equivalent to applying
- `shift` pointwise.
+ `_time_shift` pointwise.
Parameters
----------
@@ -553,6 +553,23 @@ def shift(self, periods, freq=None):
--------
Index.shift : Shift values of Index.
"""
+ return self._time_shift(periods=periods, freq=freq)
+
+ def _time_shift(self, periods, freq=None):
+ """
+ Shift each value by `periods`.
+
+ Note this is different from ExtensionArray.shift, which
+ shifts the *position* of each element, padding the end with
+ missing values.
+
+ Parameters
+ ----------
+ periods : int
+ Number of periods to shift by.
+ freq : pandas.DateOffset, pandas.Timedelta, or string
+ Frequency increment to shift by.
+ """
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
@@ -600,7 +617,7 @@ def __add__(self, other):
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
- result = self.shift(other)
+ result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
@@ -652,7 +669,7 @@ def __sub__(self, other):
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
- result = self.shift(-other)
+ result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 41b4c5c669efc..92803ab5f52e0 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -297,7 +297,7 @@ def _add_offset(self, other):
if base != self.freq.rule_code:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
- return self.shift(other.n)
+ return self._time_shift(other.n)
def _add_delta_td(self, other):
assert isinstance(other, (timedelta, np.timedelta64, Tick))
@@ -307,7 +307,7 @@ def _add_delta_td(self, other):
if isinstance(own_offset, Tick):
offset_nanos = delta_to_nanoseconds(own_offset)
if np.all(nanos % offset_nanos == 0):
- return self.shift(nanos // offset_nanos)
+ return self._time_shift(nanos // offset_nanos)
# raise when input doesn't have freq
raise IncompatibleFrequency("Input has different freq from "
@@ -317,7 +317,7 @@ def _add_delta_td(self, other):
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
- return self.shift(ordinal_delta)
+ return self._time_shift(ordinal_delta)
def shift(self, n):
"""
@@ -332,6 +332,9 @@ def shift(self, n):
-------
shifted : Period Array/Index
"""
+ return self._time_shift(n)
+
+ def _time_shift(self, n):
values = self._ndarray_values + n * self.freq.n
if self.hasnans:
values[self._isnan] = iNaT
| In preperation for PeriodArray / DatetimeArray / TimedeltaArray.
Index.shift has a different meaning from ExtensionArray.shift.
- Index.shift pointwise shifts each element by some amount
- ExtensionArray.shift shits the *position* of each value in the array
padding the end with NA
This is going to get confusing. This PR tries to avoid some of that by
internally using a new `_tshift` method (time-shift) when we want to do pointwise
shifting of each value. Places that know they want that behavior (like in the
datetimelike ops) should use that. | https://api.github.com/repos/pandas-dev/pandas/pulls/22949 | 2018-10-02T18:26:44Z | 2018-10-05T12:00:39Z | 2018-10-05T12:00:39Z | 2018-10-05T12:00:43Z |
change windows vm image | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index c82dafa224961..5d473bfc5a38c 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -18,8 +18,8 @@ jobs:
- template: ci/azure/windows.yml
parameters:
name: Windows
- vmImage: vs2017-win2017
+ vmImage: vs2017-win2016
- template: ci/azure/windows-py27.yml
parameters:
name: WindowsPy27
- vmImage: vs2017-win2017
+ vmImage: vs2017-win2016
| Closes #22946
copying https://github.com/numba/numba/pull/3371/files | https://api.github.com/repos/pandas-dev/pandas/pulls/22948 | 2018-10-02T17:55:01Z | 2018-10-02T18:25:23Z | 2018-10-02T18:25:22Z | 2018-10-02T21:37:06Z |
STYLE: #22885, Moved all setup imports to bottom of benchmark files and added noqa: F401 to each | diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index fc34440ece2ed..baac179355022 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -12,8 +12,6 @@
except (ImportError, TypeError, ValueError):
pass
-from .pandas_vb_common import setup # noqa
-
class Factorize(object):
@@ -126,3 +124,6 @@ def time_series_timedeltas(self, df):
def time_series_dates(self, df):
hashing.hash_pandas_object(df['dates'])
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index 48f0b7d71144c..7fb9fd26ad8ba 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -5,8 +5,6 @@
except ImportError:
from pandas.util.decorators import cache_readonly
-from .pandas_vb_common import setup # noqa
-
class DataFrameAttributes(object):
@@ -38,3 +36,6 @@ def prop(self):
def time_cache_readonly(self):
self.obj.prop
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index cc8766e1fa39c..35787920dc789 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -6,8 +6,6 @@
except ImportError:
import pandas.computation.expressions as expr
-from .pandas_vb_common import setup # noqa
-
class Ops(object):
@@ -149,3 +147,6 @@ def time_add_overflow_b_mask_nan(self):
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1,
b_mask=self.arr_nan_2)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 2a7717378c280..418e60eb6d6d3 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -11,8 +11,6 @@
except ImportError:
pass
-from .pandas_vb_common import setup # noqa
-
class Concat(object):
@@ -245,3 +243,6 @@ def time_getitem_list(self, index):
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 3f9016787aab4..94dbd05917455 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -2,8 +2,6 @@
import pandas.util.testing as tm
from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex
-from .pandas_vb_common import setup # noqa
-
class SeriesConstructors(object):
@@ -64,3 +62,6 @@ def setup(self):
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 8e581dcf22b4c..da2d7dc7c4492 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -5,8 +5,6 @@
except ImportError:
import pandas.computation.expressions as expr
-from .pandas_vb_common import setup # noqa
-
class Eval(object):
@@ -65,3 +63,6 @@ def time_query_datetime_column(self):
def time_query_with_boolean_selection(self):
self.df.query('(a >= @self.min_val) & (a <= @self.max_val)')
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 9def910df0bab..d3f1a416a7cc1 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -7,8 +7,6 @@
# For compatibility with older versions
from pandas.core.datetools import * # noqa
-from .pandas_vb_common import setup # noqa
-
class FromDicts(object):
@@ -99,3 +97,6 @@ def setup(self):
def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index f911d506b1f4f..89fd879746f68 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -6,8 +6,6 @@
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
isnull, NaT)
-from .pandas_vb_common import setup # noqa
-
class GetNumericData(object):
@@ -537,3 +535,6 @@ def time_series_describe(self):
def time_dataframe_describe(self):
self.df.describe()
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 21c1ccf46e1c4..32cb60be3f485 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -23,7 +23,7 @@ def wrapper(fname):
return fname
return wrapper
-from .pandas_vb_common import BaseIO, setup # noqa
+from .pandas_vb_common import BaseIO
class ParallelGroupbyMethods(object):
@@ -273,3 +273,6 @@ def time_parallel(self, threads):
def time_loop(self, threads):
for i in range(threads):
self.loop()
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index b51b41614bc49..be09bba97bea3 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -8,8 +8,6 @@
TimeGrouper, Categorical, Timestamp)
import pandas.util.testing as tm
-from .pandas_vb_common import setup # noqa
-
method_blacklist = {
'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean',
@@ -579,3 +577,6 @@ def setup(self):
def time_first(self):
self.df_nans.groupby('key').transform('first')
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index f1703e163917a..c1bc53823a342 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -3,8 +3,6 @@
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
-from .pandas_vb_common import setup # noqa
-
class SetOperations(object):
@@ -192,3 +190,6 @@ def setup(self):
def time_get_loc(self):
self.ind.get_loc(0)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 2850fa249725c..e83efdd0fa2a0 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -2,10 +2,10 @@
import numpy as np
import pandas.util.testing as tm
-from pandas import (Series, DataFrame, MultiIndex, Panel,
- Int64Index, Float64Index, IntervalIndex,
- CategoricalIndex, IndexSlice, concat, date_range)
-from .pandas_vb_common import setup # noqa
+from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index,
+ IntervalIndex, CategoricalIndex,
+ IndexSlice, concat, date_range)
+from .pandas_vb_common import Panel
class NumericSeriesIndexing(object):
@@ -367,3 +367,6 @@ def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 16d9e7cd73cbb..7fb57991c99bc 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -2,7 +2,7 @@
import pandas.util.testing as tm
from pandas import DataFrame, Series, to_numeric
-from .pandas_vb_common import numeric_dtypes, lib, setup # noqa
+from .pandas_vb_common import numeric_dtypes, lib
class NumericInferOps(object):
@@ -111,3 +111,6 @@ def setup_cache(self):
def time_convert(self, data):
lib.maybe_convert_numeric(data, set(), coerce_numeric=False)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index 12cb893462b87..ac2370fe85e5a 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -6,7 +6,7 @@
from pandas import DataFrame, Categorical, date_range, read_csv
from pandas.compat import cStringIO as StringIO
-from ..pandas_vb_common import setup, BaseIO # noqa
+from ..pandas_vb_common import BaseIO
class ToCSV(BaseIO):
@@ -225,3 +225,6 @@ def time_baseline(self):
read_csv(self.data(self.StringIO_input), sep=',', header=None,
parse_dates=[1],
names=list(string.digits[:9]))
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 58ab6bb8046c5..b873dc1040a66 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -3,7 +3,7 @@
from pandas.compat import BytesIO
import pandas.util.testing as tm
-from ..pandas_vb_common import BaseIO, setup # noqa
+from ..pandas_vb_common import BaseIO
class Excel(object):
@@ -34,3 +34,6 @@ def time_write_excel(self, engine):
writer_write = ExcelWriter(bio_write, engine=engine)
self.df.to_excel(writer_write, sheet_name='Sheet1')
writer_write.save()
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index 4b6e1d69af92d..c150d82450770 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -4,7 +4,7 @@
from pandas import DataFrame, Panel, date_range, HDFStore, read_hdf
import pandas.util.testing as tm
-from ..pandas_vb_common import BaseIO, setup # noqa
+from ..pandas_vb_common import BaseIO
class HDFStoreDataFrame(BaseIO):
@@ -149,3 +149,6 @@ def time_read_hdf(self, format):
def time_write_hdf(self, format):
self.df.to_hdf(self.fname, 'df', format=format)
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py
index acfdd327c3b51..df5bf7341c303 100644
--- a/asv_bench/benchmarks/io/json.py
+++ b/asv_bench/benchmarks/io/json.py
@@ -2,7 +2,7 @@
import pandas.util.testing as tm
from pandas import DataFrame, date_range, timedelta_range, concat, read_json
-from ..pandas_vb_common import setup, BaseIO # noqa
+from ..pandas_vb_common import BaseIO
class ReadJSON(BaseIO):
@@ -125,3 +125,6 @@ def time_float_int_lines(self, orient):
def time_float_int_str_lines(self, orient):
self.df_int_float_str.to_json(self.fname, orient='records', lines=True)
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py
index 8ccce01117ca4..7033aa9ce3c40 100644
--- a/asv_bench/benchmarks/io/msgpack.py
+++ b/asv_bench/benchmarks/io/msgpack.py
@@ -2,7 +2,7 @@
from pandas import DataFrame, date_range, read_msgpack
import pandas.util.testing as tm
-from ..pandas_vb_common import BaseIO, setup # noqa
+from ..pandas_vb_common import BaseIO
class MSGPack(BaseIO):
@@ -24,3 +24,6 @@ def time_read_msgpack(self):
def time_write_msgpack(self):
self.df.to_msgpack(self.fname)
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py
index 2ad0fcca6eb26..0960d721281c7 100644
--- a/asv_bench/benchmarks/io/pickle.py
+++ b/asv_bench/benchmarks/io/pickle.py
@@ -2,7 +2,7 @@
from pandas import DataFrame, date_range, read_pickle
import pandas.util.testing as tm
-from ..pandas_vb_common import BaseIO, setup # noqa
+from ..pandas_vb_common import BaseIO
class Pickle(BaseIO):
@@ -24,3 +24,6 @@ def time_read_pickle(self):
def time_write_pickle(self):
self.df.to_pickle(self.fname)
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py
index ef4e501e5f3b9..6a8529cad592b 100644
--- a/asv_bench/benchmarks/io/sql.py
+++ b/asv_bench/benchmarks/io/sql.py
@@ -5,8 +5,6 @@
from pandas import DataFrame, date_range, read_sql_query, read_sql_table
from sqlalchemy import create_engine
-from ..pandas_vb_common import setup # noqa
-
class SQL(object):
@@ -130,3 +128,6 @@ def setup(self, dtype):
def time_read_sql_table_column(self, dtype):
read_sql_table(self.table_name, self.con, columns=[dtype])
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py
index e0f5752ca930f..d74a531877e18 100644
--- a/asv_bench/benchmarks/io/stata.py
+++ b/asv_bench/benchmarks/io/stata.py
@@ -2,7 +2,7 @@
from pandas import DataFrame, date_range, read_stata
import pandas.util.testing as tm
-from ..pandas_vb_common import BaseIO, setup # noqa
+from ..pandas_vb_common import BaseIO
class Stata(BaseIO):
@@ -35,3 +35,6 @@ def time_read_stata(self, convert_dates):
def time_write_stata(self, convert_dates):
self.df.to_stata(self.fname, self.convert_dates)
+
+
+from ..pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 6624c3d0aaf49..57811dec8cd29 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -11,7 +11,7 @@
except ImportError:
from pandas import ordered_merge as merge_ordered
-from .pandas_vb_common import setup # noqa
+from .pandas_vb_common import Panel
class Append(object):
@@ -361,3 +361,6 @@ def time_series_align_int64_index(self):
def time_series_align_left_monotonic(self):
self.ts1.align(self.ts2, join='left')
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index 0c92214795557..eaf2bbbe510c2 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -4,8 +4,6 @@
import pandas.util.testing as tm
from pandas import date_range, MultiIndex
-from .pandas_vb_common import setup # noqa
-
class GetLoc(object):
@@ -138,3 +136,6 @@ def time_datetime_level_values_copy(self, mi):
def time_datetime_level_values_sliced(self, mi):
mi[:10].values
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index 4614bbd198afa..b87583ef925f3 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -3,7 +3,7 @@
from pandas import DataFrame, Panel, DatetimeIndex, date_range
-from .pandas_vb_common import setup # noqa
+from .pandas_vb_common import Panel
class DifferentIndexes(object):
@@ -58,3 +58,6 @@ def setup(self):
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index 4d19e9a87c507..e35455f36ed98 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -3,7 +3,7 @@
import numpy as np
from pandas import Panel
-from .pandas_vb_common import setup # noqa
+from .pandas_vb_common import Panel
class PanelMethods(object):
@@ -23,3 +23,6 @@ def time_pct_change(self, axis):
def time_shift(self, axis):
with warnings.catch_warnings(record=True):
self.panel.shift(1, axis=axis)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py
index 5b49112b0e07d..68dc39b648152 100644
--- a/asv_bench/benchmarks/plotting.py
+++ b/asv_bench/benchmarks/plotting.py
@@ -7,8 +7,6 @@
import matplotlib
matplotlib.use('Agg')
-from .pandas_vb_common import setup # noqa
-
class Plotting(object):
@@ -62,3 +60,6 @@ def setup(self):
def time_plot_andrews_curves(self):
andrews_curves(self.df, "Name")
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 413427a16f40b..13f2877e6048d 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -2,7 +2,7 @@
import pandas.util.testing as tm
from pandas import (DataFrame, Series, DatetimeIndex, MultiIndex, Index,
date_range)
-from .pandas_vb_common import setup, lib # noqa
+from .pandas_vb_common import lib
class Reindex(object):
@@ -170,3 +170,6 @@ def setup(self):
def time_lib_fast_zip(self):
lib.fast_zip(self.col_array_list)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 41208125e8f32..3236b09acec37 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -1,8 +1,6 @@
import numpy as np
import pandas as pd
-from .pandas_vb_common import setup # noqa
-
class FillNa(object):
@@ -56,3 +54,6 @@ def setup(self, constructor, replace_data):
def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 3cf9a32dab398..3140f6fc81cbb 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -5,8 +5,6 @@
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
import pandas as pd
-from .pandas_vb_common import setup # noqa
-
class Melt(object):
@@ -150,3 +148,6 @@ def time_get_dummies_1d(self):
def time_get_dummies_1d_sparse(self):
pd.get_dummies(self.s, sparse=True)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index e3bf551fa5f2b..86294e33e1e06 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -1,8 +1,6 @@
import pandas as pd
import numpy as np
-from .pandas_vb_common import setup # noqa
-
class Methods(object):
@@ -77,3 +75,6 @@ def setup(self, constructor, window, dtype, percentile, interpolation):
def time_quantile(self, constructor, window, dtype, percentile,
interpolation):
self.roll.quantile(percentile, interpolation=interpolation)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index a26c5d89bc483..2388acbc2d33f 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -4,8 +4,6 @@
import pandas.util.testing as tm
from pandas import Series, date_range, NaT
-from .pandas_vb_common import setup # noqa
-
class SeriesConstructor(object):
@@ -192,3 +190,6 @@ def setup(self):
def time_series_datetimeindex_repr(self):
getattr(self.s, 'a', None)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index dcb7694abc2ad..bbc076790a923 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -5,8 +5,6 @@
from pandas import (SparseSeries, SparseDataFrame, SparseArray, Series,
date_range, MultiIndex)
-from .pandas_vb_common import setup # noqa
-
def make_array(size, dense_proportion, fill_value, dtype):
dense_size = int(size * dense_proportion)
@@ -160,3 +158,6 @@ def time_addition(self, fill_value):
def time_division(self, fill_value):
self.arr1 / self.arr2
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index ecfcb27806f54..3a6223d283073 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -1,8 +1,6 @@
import numpy as np
import pandas as pd
-from .pandas_vb_common import setup # noqa
-
ops = ['mean', 'sum', 'median', 'std', 'skew', 'kurt', 'mad', 'prod', 'sem',
'var']
@@ -112,3 +110,6 @@ def setup(self, method):
def time_corr(self, method):
self.df.corr(method=method)
+
+
+from .pandas_vb_common import setup # noqa: F401
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 2557ba7672a0e..11a789453c2df 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -8,8 +8,6 @@
except ImportError:
from pandas.tseries.converter import DatetimeConverter
-from .pandas_vb_common import setup # noqa
-
class DatetimeIndex(object):
@@ -416,3 +414,6 @@ def time_dt_accessor(self):
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
+
+
+from .pandas_vb_common import setup # noqa: F401
| - [x] closes #22885
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
#22885
Following the suggestions of @datapythonista, I moved all the `setup` imports for the benchmark files to the end of each file and added a `# noqa: F401` like so:
`from .pandas_vb_common import setup # noqa: F401`
Now, `flake8` only returns the following two errors in `asv_bench`:
```
./benchmarks/strings.py:95:9: E731 do not assign a lambda expression, use a def
./benchmarks/io/excel.py:6:1: F401 '..pandas_vb_common.BaseIO' imported but unused
```
These may be expected, but I wanted to mention them here just in case they need addressing.
Also, running `pytest`, I got this error:
```
usage: pytest [options] [file_or_dir] [file_or_dir] [...]
pytest: error: unrecognized arguments: --strict-data-files
inifile: /pandas/setup.cfg
rootdir: /pandas
```
I found a discussion on it at #22700, but there didn't seem to be a definitive solution. Open to guidance on this.
Please let me know if any corrections are needed. Thanks! | https://api.github.com/repos/pandas-dev/pandas/pulls/22947 | 2018-10-02T16:17:35Z | 2018-10-10T17:00:48Z | 2018-10-10T17:00:47Z | 2018-10-10T17:24:29Z |
Warn when column order changed. | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 073ed8a082a11..e1d2caf4d08de 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2477,6 +2477,7 @@ Exceptions and warnings
.. autosummary::
:toctree: generated/
+ errors.ColumnOrderWarning
errors.DtypeWarning
errors.EmptyDataError
errors.OutOfBoundsDatetime
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index efa52a6f7cfe2..c4eb5ae2c6b84 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -97,6 +97,9 @@ Series can be instantiated from dicts:
If you're using Python < 3.6 or Pandas < 0.23, and an index is not passed,
the ``Series`` index will be the lexically ordered list of dict keys.
+ See :class:`pandas.errors.ColumnOrderWarning` for help with upgrading
+ from pandas 0.22 with code that may rely on the old sorting behavior.
+
In the example above, if you were on a Python version lower than 3.6 or a
Pandas version lower than 0.23, the ``Series`` would be ordered by the lexical
order of the dict keys (i.e. ``['a', 'b', 'c']`` rather than ``['b', 'a', 'c']``).
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 14e47936e1b50..608deea3a576c 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -8,12 +8,14 @@
from functools import partial
import inspect
import collections
+import warnings
import numpy as np
from pandas._libs import lib, tslibs
from pandas import compat
from pandas.compat import iteritems, PY36, OrderedDict
+from pandas.errors import ColumnOrderWarning
from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass
from pandas.core.dtypes.common import (
is_integer, is_bool_dtype, is_extension_array_dtype, is_array_like
@@ -225,6 +227,12 @@ def dict_keys_to_ordered_list(mapping):
# can be replaced by a simple list(mapping.keys())
if PY36 or isinstance(mapping, OrderedDict):
keys = list(mapping.keys())
+ if not isinstance(mapping, OrderedDict):
+ sorted_keys = try_sort(mapping)
+ if sorted_keys != keys:
+ warnings.warn(ColumnOrderWarning.message,
+ ColumnOrderWarning,
+ stacklevel=2)
else:
keys = try_sort(mapping)
return keys
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 83f80c305c5eb..e19f4e3d07aac 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -12,6 +12,7 @@
import numpy as np
import numpy.ma as ma
+from pandas.errors import ColumnOrderWarning
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.common import (
@@ -328,6 +329,16 @@ def _init_dict(self, data, index=None, dtype=None):
s = s.sort_index()
except TypeError:
pass
+ if PY36 and not isinstance(data, OrderedDict) and data:
+ # check if sort order changed.
+ try:
+ if not s.index.is_monotonic_increasing:
+ warnings.warn(ColumnOrderWarning.message,
+ ColumnOrderWarning,
+ stacklevel=3)
+ except TypeError:
+ pass
+
return s._data, s.index
@classmethod
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 147c43b30d45f..8c71d8f6a803e 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -1,10 +1,9 @@
-# flake8: noqa
-
"""
Expose public exceptions & warnings
"""
+import warnings
-from pandas._libs.tslibs import OutOfBoundsDatetime
+from pandas._libs.tslibs import OutOfBoundsDatetime # noqa: F401
class PerformanceWarning(Warning):
@@ -13,6 +12,7 @@ class PerformanceWarning(Warning):
performance impact.
"""
+
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
@@ -20,6 +20,7 @@ class UnsupportedFunctionCall(ValueError):
the object e.g. ``np.cumsum(groupby_object)``.
"""
+
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
@@ -182,3 +183,44 @@ def __str__(self):
name = self.class_instance.__class__.__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
return (msg.format(methodtype=self.methodtype, name=name))
+
+
+class ColumnOrderWarning(RuntimeWarning):
+ """Potential behavior change from 0.22 to 0.24 due to column ordering.
+
+ Pandas 0.22 respects dictionary order for Python 3.6 in certain places,
+ notably the Series and DataFrame constructors. This can subtly change
+ the behavior of downstream operations that relied on column or index
+ order.
+
+ This warning is ignored by default. To use it, we recommend elevating
+ the warning to be visible or raise when running unit tests.
+
+ .. code-block:: python
+
+ import warnings
+ from pandas.errors import ColumnOrderWarning
+
+ def test_foo():
+ with warnings.filterwarnings("always", ColumnOrderWarning):
+ ...
+
+ Or if using pytest
+
+ .. code-block:: python
+
+ @pytest.mark.filterwarnings("always::ColumnOrderWarning")
+ def test_foo():
+ ...
+ """
+ message = (
+ "Possible ehavior change due to respecting dictionary order. \n\n"
+ "Pandas >=0.22 respects dictionary order for Python >= 3.6. \n"
+ "This may change the behavior of code that relied on dictionary\n "
+ "keys being sorted. \n\n"
+ "See http://pandas.pydata.org/pandas-docs/stable/generated/pandas.errors.ColumnOrderWarning.html " # noqa
+ "for more."
+ )
+
+
+warnings.simplefilter("ignore", ColumnOrderWarning)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 2f1c9e05a01b0..eb5e75d4477e9 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -16,6 +16,7 @@
from pandas.core.dtypes.common import is_integer_dtype
from pandas.compat import (lmap, long, zip, range, lrange, lzip,
OrderedDict, is_platform_little_endian, PY36)
+from pandas.errors import ColumnOrderWarning
from pandas import compat
from pandas import (DataFrame, Index, Series, isna,
MultiIndex, Timedelta, Timestamp,
@@ -348,7 +349,9 @@ def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
- frame = DataFrame(data=d)
+ # by default, no warning
+ with tm.assert_produces_warning(None, filter_level=None):
+ frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@@ -361,6 +364,12 @@ def test_constructor_dict_order_by_values(self):
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
+ @pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
+ @pytest.mark.filterwarnings("always::pandas.errors.ColumnOrderWarning")
+ def test_constructor_dict_order_warns(self):
+ with tm.assert_produces_warning(ColumnOrderWarning):
+ pd.DataFrame({"b": [1], "a": [1]})
+
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 4817f5bdccc29..892b8826c410a 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -18,6 +18,7 @@
from pandas import (Index, Series, isna, date_range, Timestamp,
NaT, period_range, timedelta_range, MultiIndex,
IntervalIndex, Categorical, DataFrame)
+from pandas.errors import ColumnOrderWarning
from pandas._libs import lib
from pandas._libs.tslib import iNaT
@@ -890,11 +891,19 @@ def test_constructor_dict_order(self):
d = {'b': 1, 'a': 0, 'c': 2}
result = Series(d)
if PY36:
- expected = Series([1, 0, 2], index=list('bac'))
+ with tm.assert_produces_warning(None, filter_level=None):
+ # no warning by default
+ expected = Series([1, 0, 2], index=list('bac'))
else:
expected = Series([0, 1, 2], index=list('abc'))
tm.assert_series_equal(result, expected)
+ @pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
+ @pytest.mark.filterwarnings("always::pandas.errors.ColumnOrderWarning")
+ def test_constructor_dict_order_warns(self):
+ with tm.assert_produces_warning(ColumnOrderWarning):
+ pd.Series({"b": 1, "a": 0})
+
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18480
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 3db251e89842d..32eb81ab0c451 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2561,6 +2561,8 @@ class for all warnings. To check that no warning is returned,
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
+ * None - do not modify the warning filter registry. Useful when
+ testing that a warning is hidden by default.
clear : str, default None
If not ``None`` then remove any previously raised warnings from
@@ -2608,7 +2610,8 @@ class for all warnings. To check that no warning is returned,
pass
saw_warning = False
- warnings.simplefilter(filter_level)
+ if filter_level:
+ warnings.simplefilter(filter_level)
yield w
extra_warnings = []
| Warn when column order has changed from 0.22
```python
In [1]: import pandas as pd; import warnings
In [2]: df = pd.DataFrame({"b": [1, 2], "a": [3, 4]})
In [3]: warnings.filters.pop(0)
Out[3]: ('ignore', None, pandas.errors.ColumnOrderWarning, None, 0)
In [4]: df = pd.DataFrame({"b": [1, 2], "a": [3, 4]})
/Users/taugspurger/sandbox/pandas/pandas/core/frame.py:494: ColumnOrderWarning: Possible behavior change due to respecting dictionary order.
Pandas >=0.22 respects dictionary order for Python >= 3.6.
This may change the behavior of code that relied on dictionary
keys being sorted.
See http://pandas.pydata.org/pandas-docs/stable/generated/pandas.errors.ColumnOrderWarning.html for more.
keys = com.dict_keys_to_ordered_list(data)
```
Closes https://github.com/pandas-dev/pandas/issues/22709
---
cc @topper-123 @jorisvandenbossche.
Long-term, what's the plan here? I assume at some point we'll want to remove the checks for whether the keys differ. Do we need a deprecation warning for the warning? :)
I've covered the Series and DataFrame constructors. Are there other places I should be looking? | https://api.github.com/repos/pandas-dev/pandas/pulls/22945 | 2018-10-02T13:41:07Z | 2019-03-25T23:59:21Z | null | 2019-03-25T23:59:21Z |
DOC: Updating DataFrame.to_stata docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4e8b4e3a6bec..fbe17c9669f0b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1820,12 +1820,16 @@ def to_stata(self, fname, convert_dates=None, write_index=True,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
- Export Stata binary dta files.
+ Transform the Dataframe to a Stata dataset.
+
+ This functions writes the Dataframe to a Stata dataset file.
+ "dta" files contain a Stata dataset. Stata is a software for statistics
+ and data science.
Parameters
----------
fname : path (string), buffer or path object
- string, path object (pathlib.Path or py._path.local.LocalPath) or
+ String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
| Fixes #22896 | https://api.github.com/repos/pandas-dev/pandas/pulls/22943 | 2018-10-02T11:54:05Z | 2018-10-02T12:11:25Z | null | 2018-10-04T02:40:24Z |
BUG: Correctly weekly resample over DST | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 851c1a3fbd6e9..a64576565d093 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -794,6 +794,7 @@ Groupby/Resample/Rolling
- Bug in :meth:`Resampler.asfreq` when frequency of ``TimedeltaIndex`` is a subperiod of a new frequency (:issue:`13022`).
- Bug in :meth:`SeriesGroupBy.mean` when values were integral but could not fit inside of int64, overflowing instead. (:issue:`22487`)
- :func:`RollingGroupby.agg` and :func:`ExpandingGroupby.agg` now support multiple aggregation functions as parameters (:issue:`15072`)
+- Bug in :meth:`DataFrame.resample` and :meth:`Series.resample` when resampling by a weekly offset (``'W'``) across a DST transition (:issue:`9119`, :issue:`21459`)
Sparse
^^^^^^
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 878ac957a8557..70a8deb33b7f2 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -16,7 +16,8 @@
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
-from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds
+from pandas.tseries.offsets import (DateOffset, Tick, Day,
+ delta_to_nanoseconds, Nano)
from pandas.core.indexes.period import PeriodIndex
from pandas.errors import AbstractMethodError
import pandas.core.algorithms as algos
@@ -1395,18 +1396,21 @@ def _get_time_bins(self, ax):
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
- bin_edges = binner.asi8
-
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
- day_nanos = delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
- bin_edges = bin_edges + day_nanos - 1
+ # GH 21459, GH 9119: Adjust the bins relative to the wall time
+ bin_edges = binner.tz_localize(None)
+ bin_edges = bin_edges + timedelta(1) - Nano(1)
+ bin_edges = bin_edges.tz_localize(binner.tz).asi8
+ else:
+ bin_edges = binner.asi8
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
-
+ else:
+ bin_edges = binner.asi8
return binner, bin_edges
def _get_time_delta_bins(self, ax):
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index ccd2461d1512e..5cd31e08e0a9b 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -2114,6 +2114,28 @@ def test_downsample_across_dst(self):
freq='H'))
tm.assert_series_equal(result, expected)
+ def test_downsample_across_dst_weekly(self):
+ # GH 9119, GH 21459
+ df = DataFrame(index=DatetimeIndex([
+ '2017-03-25', '2017-03-26', '2017-03-27',
+ '2017-03-28', '2017-03-29'
+ ], tz='Europe/Amsterdam'),
+ data=[11, 12, 13, 14, 15])
+ result = df.resample('1W').sum()
+ expected = DataFrame([23, 42], index=pd.DatetimeIndex([
+ '2017-03-26', '2017-04-02'
+ ], tz='Europe/Amsterdam'))
+ tm.assert_frame_equal(result, expected)
+
+ idx = pd.date_range("2013-04-01", "2013-05-01", tz='Europe/London',
+ freq='H')
+ s = Series(index=idx)
+ result = s.resample('W').mean()
+ expected = Series(index=pd.date_range(
+ '2013-04-07', freq='W', periods=5, tz='Europe/London'
+ ))
+ tm.assert_series_equal(result, expected)
+
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
| - [x] closes #21459
- [x] closes #9119
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22941 | 2018-10-02T07:06:44Z | 2018-10-03T15:25:46Z | 2018-10-03T15:25:45Z | 2018-10-03T15:25:55Z |
Analytics.py fixtures added | diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index baebf414969be..e159a044a70aa 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -32,45 +32,45 @@ class TestDataFrameAnalytics(TestData):
# Correlation and covariance
@td.skip_if_no_scipy
- def test_corr_pearson(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ def test_corr_pearson(self, float_frame):
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
- def test_corr_kendall(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ def test_corr_kendall(self, float_frame):
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
- def test_corr_spearman(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ def test_corr_spearman(self, float_frame):
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('spearman')
- def _check_method(self, method='pearson', check_minp=False):
+ def _check_method(self, float_frame, method='pearson', check_minp=False):
if not check_minp:
- correls = self.frame.corr(method=method)
- exp = self.frame['A'].corr(self.frame['C'], method=method)
+ correls = float_frame.corr(method=method)
+ exp = float_frame['A'].corr(float_frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
- result = self.frame.corr(min_periods=len(self.frame) - 8)
- expected = self.frame.corr()
+ result = float_frame.corr(min_periods=len(float_frame) - 8)
+ expected = float_frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
- def test_corr_non_numeric(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ def test_corr_non_numeric(self, float_frame, float_string_frame):
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
# exclude non-numeric types
- result = self.mixed_frame.corr()
- expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
+ result = float_string_frame.corr()
+ expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@@ -138,36 +138,36 @@ def test_corr_invalid_method(self):
with tm.assert_raises_regex(ValueError, msg):
df.corr(method="____")
- def test_cov(self):
+ def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
- expected = self.frame.cov()
- result = self.frame.cov(min_periods=len(self.frame))
+ expected = float_frame.cov()
+ result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
- result = self.frame.cov(min_periods=len(self.frame) + 1)
+ result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
- frame = self.frame.copy()
+ frame = float_frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
- result = self.frame.cov(min_periods=len(self.frame) - 8)
- expected = self.frame.cov()
+ result = float_frame.cov(min_periods=len(float_frame) - 8)
+ expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
- self.frame['A'][:5] = nan
- self.frame['B'][:10] = nan
- cov = self.frame.cov()
+ float_frame['A'][:5] = nan
+ float_frame['B'][:10] = nan
+ cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
- self.frame['A'].cov(self.frame['C']))
+ float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
- result = self.mixed_frame.cov()
- expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
+ result = float_string_frame.cov()
+ expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
@@ -182,11 +182,11 @@ def test_cov(self):
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
- def test_corrwith(self):
- a = self.tsframe
+ def test_corrwith(self, datetime_frame):
+ a = datetime_frame
noise = Series(randn(len(a)), index=a.index)
- b = self.tsframe.add(noise, axis=0)
+ b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
@@ -231,9 +231,9 @@ def test_corrwith_with_objects(self):
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
- def test_corrwith_series(self):
- result = self.tsframe.corrwith(self.tsframe['A'])
- expected = self.tsframe.apply(self.tsframe['A'].corr)
+ def test_corrwith_series(self, datetime_frame):
+ result = datetime_frame.corrwith(datetime_frame['A'])
+ expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
@@ -507,13 +507,13 @@ def test_nunique(self):
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
- def test_sum(self):
+ def test_sum(self, mixed_float_frame):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
- frame=self.mixed_float.astype('float32'),
+ frame=mixed_float_frame.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@@ -559,25 +559,25 @@ def wrapper(x):
self._check_stat_op('median', wrapper, check_dates=True)
- def test_min(self):
+ def test_min(self, int_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self._check_stat_op('min', np.min, check_dates=True)
- self._check_stat_op('min', np.min, frame=self.intframe)
+ self._check_stat_op('min', np.min, frame=int_frame)
- def test_cummin(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ def test_cummin(self, datetime_frame):
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cummin = self.tsframe.cummin()
- expected = self.tsframe.apply(Series.cummin)
+ cummin = datetime_frame.cummin()
+ expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
- cummin = self.tsframe.cummin(axis=1)
- expected = self.tsframe.apply(Series.cummin, axis=1)
+ cummin = datetime_frame.cummin(axis=1)
+ expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
@@ -585,22 +585,22 @@ def test_cummin(self):
result = df.cummin() # noqa
# fix issue
- cummin_xs = self.tsframe.cummin(axis=1)
- assert np.shape(cummin_xs) == np.shape(self.tsframe)
+ cummin_xs = datetime_frame.cummin(axis=1)
+ assert np.shape(cummin_xs) == np.shape(datetime_frame)
- def test_cummax(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ def test_cummax(self, datetime_frame):
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cummax = self.tsframe.cummax()
- expected = self.tsframe.apply(Series.cummax)
+ cummax = datetime_frame.cummax()
+ expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
- cummax = self.tsframe.cummax(axis=1)
- expected = self.tsframe.apply(Series.cummax, axis=1)
+ cummax = datetime_frame.cummax(axis=1)
+ expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
@@ -608,32 +608,32 @@ def test_cummax(self):
result = df.cummax() # noqa
# fix issue
- cummax_xs = self.tsframe.cummax(axis=1)
- assert np.shape(cummax_xs) == np.shape(self.tsframe)
+ cummax_xs = datetime_frame.cummax(axis=1)
+ assert np.shape(cummax_xs) == np.shape(datetime_frame)
- def test_max(self):
+ def test_max(self, int_frame):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self._check_stat_op('max', np.max, check_dates=True)
- self._check_stat_op('max', np.max, frame=self.intframe)
+ self._check_stat_op('max', np.max, frame=int_frame)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
- def test_var_std(self):
+ def test_var_std(self, datetime_frame):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
- result = self.tsframe.std(ddof=4)
- expected = self.tsframe.apply(lambda x: x.std(ddof=4))
+ result = datetime_frame.std(ddof=4)
+ expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
- result = self.tsframe.var(ddof=4)
- expected = self.tsframe.apply(lambda x: x.var(ddof=4))
+ result = datetime_frame.var(ddof=4)
+ expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
@@ -685,19 +685,19 @@ def test_mixed_ops(self, op):
result = getattr(df, op)()
assert len(result) == 2
- def test_cumsum(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ def test_cumsum(self, datetime_frame):
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cumsum = self.tsframe.cumsum()
- expected = self.tsframe.apply(Series.cumsum)
+ cumsum = datetime_frame.cumsum()
+ expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
- cumsum = self.tsframe.cumsum(axis=1)
- expected = self.tsframe.apply(Series.cumsum, axis=1)
+ cumsum = datetime_frame.cumsum(axis=1)
+ expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
@@ -705,44 +705,44 @@ def test_cumsum(self):
result = df.cumsum() # noqa
# fix issue
- cumsum_xs = self.tsframe.cumsum(axis=1)
- assert np.shape(cumsum_xs) == np.shape(self.tsframe)
+ cumsum_xs = datetime_frame.cumsum(axis=1)
+ assert np.shape(cumsum_xs) == np.shape(datetime_frame)
- def test_cumprod(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ def test_cumprod(self, datetime_frame):
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cumprod = self.tsframe.cumprod()
- expected = self.tsframe.apply(Series.cumprod)
+ cumprod = datetime_frame.cumprod()
+ expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
- cumprod = self.tsframe.cumprod(axis=1)
- expected = self.tsframe.apply(Series.cumprod, axis=1)
+ cumprod = datetime_frame.cumprod(axis=1)
+ expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
- cumprod_xs = self.tsframe.cumprod(axis=1)
- assert np.shape(cumprod_xs) == np.shape(self.tsframe)
+ cumprod_xs = datetime_frame.cumprod(axis=1)
+ assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
- df = self.tsframe.fillna(0).astype(int)
+ df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
- df = self.tsframe.fillna(0).astype(np.int32)
+ df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
- def test_sem(self):
+ def test_sem(self, datetime_frame):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
- result = self.tsframe.sem(ddof=4)
- expected = self.tsframe.apply(
+ result = datetime_frame.sem(ddof=4)
+ expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
@@ -788,12 +788,13 @@ def alt(x):
assert kurt.name is None
assert kurt2.name == 'bar'
- def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
+ def _check_stat_op(self, name, alternative, float_frame,
+ float_string_frame, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
- frame = self.frame
+ frame = float_frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
@@ -853,18 +854,18 @@ def wrapper(x):
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
- getattr(self.mixed_frame, name)(axis=0)
- getattr(self.mixed_frame, name)(axis=1)
+ getattr(float_string_frame, name)(axis=0)
+ getattr(float_string_frame, name)(axis=1)
if has_numeric_only:
- getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
- getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
- getattr(self.frame, name)(axis=0, numeric_only=False)
- getattr(self.frame, name)(axis=1, numeric_only=False)
+ getattr(float_string_frame, name)(axis=0, numeric_only=True)
+ getattr(float_string_frame, name)(axis=1, numeric_only=True)
+ getattr(float_frame, name)(axis=0, numeric_only=False)
+ getattr(float_frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
- all_na = self.frame * np.NaN
+ all_na = float_frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
@@ -1022,9 +1023,9 @@ def test_operators_timedelta64(self):
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
- def test_sum_corner(self):
- axis0 = self.empty.sum(0)
- axis1 = self.empty.sum(1)
+ def test_sum_corner(self, empty_frame):
+ axis0 = empty_frame.sum(0)
+ axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
@@ -1090,59 +1091,59 @@ def test_sum_nanops_timedelta(self):
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
- def test_sum_object(self):
- values = self.frame.values.astype(int)
- frame = DataFrame(values, index=self.frame.index,
- columns=self.frame.columns)
+ def test_sum_object(self, float_frame):
+ values = float_frame.values.astype(int)
+ frame = DataFrame(values, index=float_frame.index,
+ columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
- def test_sum_bool(self):
+ def test_sum_bool(self, float_frame):
# ensure this works, bug report
- bools = np.isnan(self.frame)
+ bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
- def test_mean_corner(self):
+ def test_mean_corner(self, float_string_frame, float_frame):
# unit test when have object data
- the_mean = self.mixed_frame.mean(axis=0)
- the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
+ the_mean = float_string_frame.mean(axis=0)
+ the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
- assert len(the_mean.index) < len(self.mixed_frame.columns)
+ assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
- the_mean = self.mixed_frame.mean(axis=1)
- the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
+ the_mean = float_string_frame.mean(axis=1)
+ the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
- self.frame['bool'] = self.frame['A'] > 0
- means = self.frame.mean(0)
- assert means['bool'] == self.frame['bool'].values.mean()
+ float_frame['bool'] = float_frame['A'] > 0
+ means = float_frame.mean(0)
+ assert means['bool'] == float_frame['bool'].values.mean()
- def test_stats_mixed_type(self):
+ def test_stats_mixed_type(self, float_string_frame):
# don't blow up
- self.mixed_frame.std(1)
- self.mixed_frame.var(1)
- self.mixed_frame.mean(1)
- self.mixed_frame.skew(1)
+ float_string_frame.std(1)
+ float_string_frame.var(1)
+ float_string_frame.mean(1)
+ float_string_frame.skew(1)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
- def test_median_corner(self):
+ def test_median_corner(self, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
- self._check_stat_op('median', wrapper, frame=self.intframe,
+ self._check_stat_op('median', wrapper, frame=int_frame,
check_dtype=False, check_dates=True)
# Miscellanea
- def test_count_objects(self):
- dm = DataFrame(self.mixed_frame._series)
- df = DataFrame(self.mixed_frame._series)
+ def test_count_objects(self, float_string_frame):
+ dm = DataFrame(float_string_frame._series)
+ df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
@@ -1160,13 +1161,13 @@ def test_sum_bools(self):
# Index of max / min
- def test_idxmin(self):
- frame = self.frame
+ def test_idxmin(self, float_frame, int_frame):
+ frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
- for df in [frame, self.intframe]:
+ for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
@@ -1174,13 +1175,13 @@ def test_idxmin(self):
pytest.raises(ValueError, frame.idxmin, axis=2)
- def test_idxmax(self):
- frame = self.frame
+ def test_idxmax(self, float_frame, int_frame):
+ frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
- for df in [frame, self.intframe]:
+ for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
@@ -1325,10 +1326,11 @@ def test_any_all_level_axis_none_raises(self, method):
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
- def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
+ def _check_bool_op(self, name, alternative, float_frame,
+ float_string_frame, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
- frame = self.frame > 0
+ frame = float_frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
@@ -1368,7 +1370,7 @@ def wrapper(x):
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
- mixed = self.mixed_frame
+ mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
@@ -1746,34 +1748,34 @@ def test_pct_change(self):
# Clip
- def test_clip(self):
- median = self.frame.median().median()
- original = self.frame.copy()
+ def test_clip(self, float_frame):
+ median = float_frame.median().median()
+ original = float_frame.copy()
- capped = self.frame.clip_upper(median)
+ capped = float_frame.clip_upper(median)
assert not (capped.values > median).any()
- floored = self.frame.clip_lower(median)
+ floored = float_frame.clip_lower(median)
assert not (floored.values < median).any()
- double = self.frame.clip(upper=median, lower=median)
+ double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
- # Verify that self.frame was not changed inplace
- assert (self.frame.values == original.values).all()
+ # Verify that float_frame was not changed inplace
+ assert (float_frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
- median = self.frame.median().median()
- frame_copy = self.frame.copy()
+ median = float_frame.median().median()
+ frame_copy = float_frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
- frame_copy = self.frame.copy()
+ frame_copy = float_frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
- frame_copy = self.frame.copy()
+ frame_copy = float_frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
@@ -1839,9 +1841,10 @@ def test_clip_against_series(self, inplace):
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
- def test_clip_against_list_like(self, inplace, lower, axis, res):
+ def test_clip_against_list_like(self, inplace, lower, axis, res,
+ simple_frame):
# GH #15390
- original = self.simple.copy(deep=True)
+ original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
@@ -1869,12 +1872,12 @@ def test_clip_against_frame(self, axis):
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
- def test_clip_with_na_args(self):
+ def test_clip_with_na_args(self, float_frame):
"""Should process np.nan argument as None """
# GH # 17276
- tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
- tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
- self.frame)
+ tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
+ tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan),
+ float_frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
| - [ ] Analytics.py tests passed
- Analytics.py fixtures added for issue #22471
| https://api.github.com/repos/pandas-dev/pandas/pulls/22940 | 2018-10-02T04:55:28Z | 2018-11-05T18:31:33Z | null | 2018-11-05T18:31:33Z |
ENH: Implement overlaps method for Interval-like | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 073ed8a082a11..ce8e9f737e5af 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1651,6 +1651,7 @@ IntervalIndex Components
IntervalIndex.get_loc
IntervalIndex.get_indexer
IntervalIndex.set_closed
+ IntervalIndex.overlaps
.. _api.multiindex:
@@ -2037,6 +2038,7 @@ Properties
Interval.mid
Interval.open_left
Interval.open_right
+ Interval.overlaps
Interval.right
Timedelta
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 851c1a3fbd6e9..428278ad2c781 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -194,6 +194,7 @@ Other Enhancements
- :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`).
- New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`).
- Compatibility with Matplotlib 3.0 (:issue:`22790`).
+- Added :meth:`Interval.overlaps`, :meth:`IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 82261094022fb..a395fdbabeca2 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -10,6 +10,7 @@ from cython cimport Py_ssize_t
import numpy as np
from numpy cimport ndarray
+from operator import le, lt
cimport util
util.import_array()
@@ -359,6 +360,67 @@ cdef class Interval(IntervalMixin):
self.left // y, self.right // y, closed=self.closed)
return NotImplemented
+ def overlaps(self, other):
+ """
+ Check whether two Interval objects overlap.
+
+ Two intervals overlap if they share a common point, including closed
+ endpoints. Intervals that only have an open endpoint in common do not
+ overlap.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ other : Interval
+ The interval to check against for an overlap.
+
+ Returns
+ -------
+ bool
+ ``True`` if the two intervals overlap, else ``False``.
+
+ Examples
+ --------
+ >>> i1 = pd.Interval(0, 2)
+ >>> i2 = pd.Interval(1, 3)
+ >>> i1.overlaps(i2)
+ True
+ >>> i3 = pd.Interval(4, 5)
+ >>> i1.overlaps(i3)
+ False
+
+ Intervals that share closed endpoints overlap:
+
+ >>> i4 = pd.Interval(0, 1, closed='both')
+ >>> i5 = pd.Interval(1, 2, closed='both')
+ >>> i4.overlaps(i5)
+ True
+
+ Intervals that only have an open endpoint in common do not overlap:
+
+ >>> i6 = pd.Interval(1, 2, closed='neither')
+ >>> i4.overlaps(i6)
+ False
+
+ See Also
+ --------
+ IntervalArray.overlaps : The corresponding method for IntervalArray
+ IntervalIndex.overlaps : The corresponding method for IntervalIndex
+ """
+ if not isinstance(other, Interval):
+ msg = '`other` must be an Interval, got {other}'
+ raise TypeError(msg.format(other=type(other).__name__))
+
+ # equality is okay if both endpoints are closed (overlap at a point)
+ op1 = le if (self.closed_left and other.closed_right) else lt
+ op2 = le if (other.closed_left and self.closed_right) else lt
+
+ # overlaps is equivalent negation of two interval being disjoint:
+ # disjoint = (A.left > B.right) or (B.left > A.right)
+ # (simplifying the negation allows this to be done in less operations)
+ return op1(self.left, other.right) and op2(other.left, self.right)
+
@cython.wraparound(False)
@cython.boundscheck(False)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 621de3ffd4b12..fd3c9c277b397 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -275,6 +275,14 @@ def closed(request):
return request.param
+@pytest.fixture(params=['left', 'right', 'both', 'neither'])
+def other_closed(request):
+ """
+ Secondary closed fixture to allow parametrizing over all pairs of closed
+ """
+ return request.param
+
+
@pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')])
def nulls_fixture(request):
"""
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 90df596b98296..1ac89c0b18462 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1,6 +1,8 @@
import textwrap
import numpy as np
+from operator import le, lt
+
from pandas._libs.interval import (Interval, IntervalMixin,
intervals_to_interval_bounds)
from pandas.compat import add_metaclass
@@ -27,8 +29,11 @@
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_interval_shared_docs = {}
+
+# TODO(jschendel) remove constructor key when IntervalArray is public (GH22860)
_shared_docs_kwargs = dict(
klass='IntervalArray',
+ constructor='pd.core.arrays.IntervalArray',
name=''
)
@@ -1015,6 +1020,67 @@ def repeat(self, repeats, **kwargs):
right_repeat = self.right.repeat(repeats, **kwargs)
return self._shallow_copy(left=left_repeat, right=right_repeat)
+ _interval_shared_docs['overlaps'] = """
+ Check elementwise if an Interval overlaps the values in the %(klass)s.
+
+ Two intervals overlap if they share a common point, including closed
+ endpoints. Intervals that only have an open endpoint in common do not
+ overlap.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ other : Interval
+ Interval to check against for an overlap.
+
+ Returns
+ -------
+ ndarray
+ Boolean array positionally indicating where an overlap occurs.
+
+ Examples
+ --------
+ >>> intervals = %(constructor)s.from_tuples([(0, 1), (1, 3), (2, 4)])
+ >>> intervals
+ %(klass)s([(0, 1], (1, 3], (2, 4]],
+ closed='right',
+ dtype='interval[int64]')
+ >>> intervals.overlaps(pd.Interval(0.5, 1.5))
+ array([ True, True, False])
+
+ Intervals that share closed endpoints overlap:
+
+ >>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
+ array([ True, True, True])
+
+ Intervals that only have an open endpoint in common do not overlap:
+
+ >>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
+ array([False, True, False])
+
+ See Also
+ --------
+ Interval.overlaps : Check whether two Interval objects overlap.
+ """
+
+ @Appender(_interval_shared_docs['overlaps'] % _shared_docs_kwargs)
+ def overlaps(self, other):
+ if isinstance(other, (IntervalArray, ABCIntervalIndex)):
+ raise NotImplementedError
+ elif not isinstance(other, Interval):
+ msg = '`other` must be Interval-like, got {other}'
+ raise TypeError(msg.format(other=type(other).__name__))
+
+ # equality is okay if both endpoints are closed (overlap at a point)
+ op1 = le if (self.closed_left and other.closed_right) else lt
+ op2 = le if (other.closed_left and self.closed_right) else lt
+
+ # overlaps is equivalent negation of two interval being disjoint:
+ # disjoint = (A.left > B.right) or (B.left > A.right)
+ # (simplifying the negation allows this to be done in less operations)
+ return op1(self.left, other.right) & op2(other.left, self.right)
+
def maybe_convert_platform_interval(values):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 4b125580bd7e0..5a058c80d40c8 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -46,8 +46,11 @@
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
+
+# TODO(jschendel) remove constructor key when IntervalArray is public (GH22860)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
+ constructor='pd.IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
@@ -982,6 +985,10 @@ def equals(self, other):
self.right.equals(other.right) and
self.closed == other.closed)
+ @Appender(_interval_shared_docs['overlaps'] % _index_doc_kwargs)
+ def overlaps(self, other):
+ return self._data.overlaps(other)
+
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
diff --git a/pandas/tests/arrays/interval/__init__.py b/pandas/tests/arrays/interval/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/arrays/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
similarity index 100%
rename from pandas/tests/arrays/test_interval.py
rename to pandas/tests/arrays/interval/test_interval.py
index bcf4cea795978..ff69b68f1117c 100644
--- a/pandas/tests/arrays/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
-import pytest
import numpy as np
+import pytest
+import pandas.util.testing as tm
from pandas import Index, IntervalIndex, date_range, timedelta_range
from pandas.core.arrays import IntervalArray
-import pandas.util.testing as tm
@pytest.fixture(params=[
diff --git a/pandas/tests/arrays/interval/test_ops.py b/pandas/tests/arrays/interval/test_ops.py
new file mode 100644
index 0000000000000..7000ff0f0c3f6
--- /dev/null
+++ b/pandas/tests/arrays/interval/test_ops.py
@@ -0,0 +1,82 @@
+"""Tests for Interval-Interval operations, such as overlaps, contains, etc."""
+import numpy as np
+import pytest
+
+import pandas.util.testing as tm
+from pandas import Interval, IntervalIndex, Timedelta, Timestamp
+from pandas.core.arrays import IntervalArray
+
+
+@pytest.fixture(params=[IntervalArray, IntervalIndex])
+def constructor(request):
+ """
+ Fixture for testing both interval container classes.
+ """
+ return request.param
+
+
+@pytest.fixture(params=[
+ (Timedelta('0 days'), Timedelta('1 day')),
+ (Timestamp('2018-01-01'), Timedelta('1 day')),
+ (0, 1)], ids=lambda x: type(x[0]).__name__)
+def start_shift(request):
+ """
+ Fixture for generating intervals of different types from a start value
+ and a shift value that can be added to start to generate an endpoint.
+ """
+ return request.param
+
+
+class TestOverlaps(object):
+
+ def test_overlaps_interval(
+ self, constructor, start_shift, closed, other_closed):
+ start, shift = start_shift
+ interval = Interval(start, start + 3 * shift, other_closed)
+
+ # intervals: identical, nested, spanning, partial, adjacent, disjoint
+ tuples = [(start, start + 3 * shift),
+ (start + shift, start + 2 * shift),
+ (start - shift, start + 4 * shift),
+ (start + 2 * shift, start + 4 * shift),
+ (start + 3 * shift, start + 4 * shift),
+ (start + 4 * shift, start + 5 * shift)]
+ interval_container = constructor.from_tuples(tuples, closed)
+
+ adjacent = (interval.closed_right and interval_container.closed_left)
+ expected = np.array([True, True, True, True, adjacent, False])
+ result = interval_container.overlaps(interval)
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('other_constructor', [
+ IntervalArray, IntervalIndex])
+ def test_overlaps_interval_container(self, constructor, other_constructor):
+ # TODO: modify this test when implemented
+ interval_container = constructor.from_breaks(range(5))
+ other_container = other_constructor.from_breaks(range(5))
+ with pytest.raises(NotImplementedError):
+ interval_container.overlaps(other_container)
+
+ def test_overlaps_na(self, constructor, start_shift):
+ """NA values are marked as False"""
+ start, shift = start_shift
+ interval = Interval(start, start + shift)
+
+ tuples = [(start, start + shift),
+ np.nan,
+ (start + 2 * shift, start + 3 * shift)]
+ interval_container = constructor.from_tuples(tuples)
+
+ expected = np.array([True, False, False])
+ result = interval_container.overlaps(interval)
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('other', [
+ 10, True, 'foo', Timedelta('1 day'), Timestamp('2018-01-01')],
+ ids=lambda x: type(x).__name__)
+ def test_overlaps_invalid_type(self, constructor, other):
+ interval_container = constructor.from_breaks(range(5))
+ msg = '`other` must be Interval-like, got {other}'.format(
+ other=type(other).__name__)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_container.overlaps(other)
diff --git a/pandas/tests/scalar/interval/test_ops.py b/pandas/tests/scalar/interval/test_ops.py
new file mode 100644
index 0000000000000..cfd9fc34faeff
--- /dev/null
+++ b/pandas/tests/scalar/interval/test_ops.py
@@ -0,0 +1,61 @@
+"""Tests for Interval-Interval operations, such as overlaps, contains, etc."""
+import pytest
+
+import pandas.util.testing as tm
+from pandas import Interval, Timedelta, Timestamp
+
+
+@pytest.fixture(params=[
+ (Timedelta('0 days'), Timedelta('1 day')),
+ (Timestamp('2018-01-01'), Timedelta('1 day')),
+ (0, 1)], ids=lambda x: type(x[0]).__name__)
+def start_shift(request):
+ """
+ Fixture for generating intervals of types from a start value and a shift
+ value that can be added to start to generate an endpoint
+ """
+ return request.param
+
+
+class TestOverlaps(object):
+
+ def test_overlaps_self(self, start_shift, closed):
+ start, shift = start_shift
+ interval = Interval(start, start + shift, closed)
+ assert interval.overlaps(interval)
+
+ def test_overlaps_nested(self, start_shift, closed, other_closed):
+ start, shift = start_shift
+ interval1 = Interval(start, start + 3 * shift, other_closed)
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
+
+ # nested intervals should always overlap
+ assert interval1.overlaps(interval2)
+
+ def test_overlaps_disjoint(self, start_shift, closed, other_closed):
+ start, shift = start_shift
+ interval1 = Interval(start, start + shift, other_closed)
+ interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
+
+ # disjoint intervals should never overlap
+ assert not interval1.overlaps(interval2)
+
+ def test_overlaps_endpoint(self, start_shift, closed, other_closed):
+ start, shift = start_shift
+ interval1 = Interval(start, start + shift, other_closed)
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
+
+ # overlap if shared endpoint is closed for both (overlap at a point)
+ result = interval1.overlaps(interval2)
+ expected = interval1.closed_right and interval2.closed_left
+ assert result == expected
+
+ @pytest.mark.parametrize('other', [
+ 10, True, 'foo', Timedelta('1 day'), Timestamp('2018-01-01')],
+ ids=lambda x: type(x).__name__)
+ def test_overlaps_invalid_type(self, other):
+ interval = Interval(0, 1)
+ msg = '`other` must be an Interval, got {other}'.format(
+ other=type(other).__name__)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval.overlaps(other)
| - [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
- xref #21998, #18975
Implements `Interval.overlaps`, `IntervalArray.overlaps`, and `IntervalIndex.overlaps`.
Implementing this so there's an interval method that can be tested in the interval accessor PR (#19502), as there are currently only attributes that can be used by the accessor. Planning to reboot that PR soon after this is merged.
I didn't implement `IntervalArray.overlaps(IntervalArray)`; the PR providing the specs for `overlaps` was recently closed due to being stale, and I don't recall there being a resolution on this behavior.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22939 | 2018-10-02T03:10:44Z | 2018-10-24T12:18:58Z | 2018-10-24T12:18:58Z | 2018-10-25T02:14:40Z |
Update type for PeriodDtype / DatetimeTZDtype / IntervalDtype | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 851c1a3fbd6e9..9808f5d735535 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -505,6 +505,7 @@ ExtensionType Changes
- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`).
- Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`)
- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`)
+- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
.. _whatsnew_0240.api.incompatibilities:
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index a552251ebbafa..5c9ba921226c0 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -175,7 +175,9 @@ def type(self):
"""The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
- of ``ExtensionDtype.type`` for scalar ``item``.
+ of ``ExtensionDtype.type`` for scalar ``item``, assuming
+ that value is valid (not NA). NA values do not need to be
+ instances of `type`.
"""
raise AbstractMethodError(self)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index e2b9e246aee50..7c91e62cc9876 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -4,12 +4,13 @@
from pandas.compat import (string_types, text_type, binary_type,
PY3, PY36)
from pandas._libs import algos, lib
-from pandas._libs.tslibs import conversion
+from pandas._libs.tslibs import conversion, Period, Timestamp
+from pandas._libs.interval import Interval
from pandas.core.dtypes.dtypes import (
registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
- DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype,
- IntervalDtypeType, PandasExtensionDtype, ExtensionDtype,
+ PeriodDtype, IntervalDtype,
+ PandasExtensionDtype, ExtensionDtype,
_pandas_registry)
from pandas.core.dtypes.generic import (
ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries,
@@ -1905,20 +1906,20 @@ def _get_dtype_type(arr_or_dtype):
elif isinstance(arr_or_dtype, CategoricalDtype):
return CategoricalDtypeType
elif isinstance(arr_or_dtype, DatetimeTZDtype):
- return DatetimeTZDtypeType
+ return Timestamp
elif isinstance(arr_or_dtype, IntervalDtype):
- return IntervalDtypeType
+ return Interval
elif isinstance(arr_or_dtype, PeriodDtype):
- return PeriodDtypeType
+ return Period
elif isinstance(arr_or_dtype, string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtypeType
elif is_datetime64tz_dtype(arr_or_dtype):
- return DatetimeTZDtypeType
+ return Timestamp
elif is_period_dtype(arr_or_dtype):
- return PeriodDtypeType
+ return Period
elif is_interval_dtype(arr_or_dtype):
- return IntervalDtypeType
+ return Interval
return _get_dtype_type(np.dtype(arr_or_dtype))
try:
return arr_or_dtype.dtype.type
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index d879ded4f0f09..944365232db65 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -4,6 +4,8 @@
import numpy as np
from pandas import compat
from pandas.core.dtypes.generic import ABCIndexClass, ABCCategoricalIndex
+from pandas._libs.tslibs import Period, NaT, Timestamp
+from pandas._libs.interval import Interval
from .base import ExtensionDtype, _DtypeOpsMixin
@@ -469,13 +471,6 @@ def _is_boolean(self):
return is_bool_dtype(self.categories)
-class DatetimeTZDtypeType(type):
- """
- the type of DatetimeTZDtype, this metaclass determines subclass ability
- """
- pass
-
-
class DatetimeTZDtype(PandasExtensionDtype):
"""
@@ -485,7 +480,7 @@ class DatetimeTZDtype(PandasExtensionDtype):
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of
np.datetime64[ns]
"""
- type = DatetimeTZDtypeType
+ type = Timestamp
kind = 'M'
str = '|M8[ns]'
num = 101
@@ -583,20 +578,13 @@ def __eq__(self, other):
str(self.tz) == str(other.tz))
-class PeriodDtypeType(type):
- """
- the type of PeriodDtype, this metaclass determines subclass ability
- """
- pass
-
-
class PeriodDtype(PandasExtensionDtype):
"""
A Period duck-typed class, suitable for holding a period with freq dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.int64.
"""
- type = PeriodDtypeType
+ type = Period
kind = 'O'
str = '|O08'
base = np.dtype('O')
@@ -666,11 +654,15 @@ def construct_from_string(cls, string):
raise TypeError("could not construct PeriodDtype")
def __unicode__(self):
- return "period[{freq}]".format(freq=self.freq.freqstr)
+ return compat.text_type(self.name)
@property
def name(self):
- return str(self)
+ return str("period[{freq}]".format(freq=self.freq.freqstr))
+
+ @property
+ def na_value(self):
+ return NaT
def __hash__(self):
# make myself hashable
@@ -705,13 +697,6 @@ def is_dtype(cls, dtype):
return super(PeriodDtype, cls).is_dtype(dtype)
-class IntervalDtypeType(type):
- """
- the type of IntervalDtype, this metaclass determines subclass ability
- """
- pass
-
-
@register_extension_dtype
class IntervalDtype(PandasExtensionDtype, ExtensionDtype):
"""
@@ -800,7 +785,6 @@ def construct_from_string(cls, string):
@property
def type(self):
- from pandas import Interval
return Interval
def __unicode__(self):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index a7a9faa9e77eb..f87c51a4ee16b 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -605,15 +605,15 @@ def test__get_dtype_fails(input_param):
(pd.DatetimeIndex([1, 2]), np.datetime64),
(pd.DatetimeIndex([1, 2]).dtype, np.datetime64),
('<M8[ns]', np.datetime64),
- (pd.DatetimeIndex([1, 2], tz='Europe/London'), com.DatetimeTZDtypeType),
+ (pd.DatetimeIndex([1, 2], tz='Europe/London'), pd.Timestamp),
(pd.DatetimeIndex([1, 2], tz='Europe/London').dtype,
- com.DatetimeTZDtypeType),
- ('datetime64[ns, Europe/London]', com.DatetimeTZDtypeType),
+ pd.Timestamp),
+ ('datetime64[ns, Europe/London]', pd.Timestamp),
(pd.SparseSeries([1, 2], dtype='int32'), np.int32),
(pd.SparseSeries([1, 2], dtype='int32').dtype, np.int32),
- (PeriodDtype(freq='D'), com.PeriodDtypeType),
- ('period[D]', com.PeriodDtypeType),
- (IntervalDtype(), com.IntervalDtypeType),
+ (PeriodDtype(freq='D'), pd.Period),
+ ('period[D]', pd.Period),
+ (IntervalDtype(), pd.Interval),
(None, type(None)),
(1, type(None)),
(1.2, type(None)),
| Split from PeriodArray.
Updating the type to be the more correct `Period`.
Also removed the essentially unused `IntervalDtypeType`. If this doesn't break anything, it seems like there are decent chunks of `_get_dtype_type` that aren't being hit, aside from `test__get_dtype_type`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22938 | 2018-10-02T02:42:42Z | 2018-10-04T11:55:10Z | 2018-10-04T11:55:10Z | 2018-10-04T11:56:37Z |
DOC: fix DataFrame.sample doctests and reformat the docstring | diff --git a/ci/doctests.sh b/ci/doctests.sh
index b3d7f6785815a..16b3430f1e431 100755
--- a/ci/doctests.sh
+++ b/ci/doctests.sh
@@ -35,7 +35,7 @@ if [ "$DOCTEST" ]; then
fi
pytest --doctest-modules -v pandas/core/generic.py \
- -k"-_set_axis_name -_xs -describe -droplevel -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -resample -sample -to_json -transpose -values -xs"
+ -k"-_set_axis_name -_xs -describe -droplevel -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -resample -to_json -transpose -values -xs"
if [ $? -ne "0" ]; then
RET=1
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 393e7caae5fab..38555262885ec 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4326,8 +4326,8 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
- replace : boolean, optional
- Sample with or without replacement. Default = False.
+ replace : bool, default False
+ Sample with or without replacement.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
@@ -4340,7 +4340,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
- inf and -inf values not allowed.
+ Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
@@ -4350,58 +4350,52 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
Returns
-------
- A new object of same type as caller.
+ Series or DataFrame
+ A new object of same type as caller containing `n` items randomly
+ sampled from the caller object.
- Examples
+ See Also
--------
- Generate an example ``Series`` and ``DataFrame``:
-
- >>> s = pd.Series(np.random.randn(50))
- >>> s.head()
- 0 -0.038497
- 1 1.820773
- 2 -0.972766
- 3 -1.598270
- 4 -1.095526
- dtype: float64
- >>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD'))
- >>> df.head()
- A B C D
- 0 0.016443 -2.318952 -0.566372 -1.028078
- 1 -1.051921 0.438836 0.658280 -0.175797
- 2 -1.243569 -0.364626 -0.215065 0.057736
- 3 1.768216 0.404512 -0.385604 -1.457834
- 4 1.072446 -1.137172 0.314194 -0.046661
-
- Next extract a random sample from both of these objects...
+ numpy.random.choice: Generates a random sample from a given 1-D numpy
+ array.
- 3 random elements from the ``Series``:
-
- >>> s.sample(n=3)
- 27 -0.994689
- 55 -1.049016
- 67 -0.224565
- dtype: float64
-
- And a random 10% of the ``DataFrame`` with replacement:
-
- >>> df.sample(frac=0.1, replace=True)
- A B C D
- 35 1.981780 0.142106 1.817165 -0.290805
- 49 -1.336199 -0.448634 -0.789640 0.217116
- 40 0.823173 -0.078816 1.009536 1.015108
- 15 1.421154 -0.055301 -1.922594 -0.019696
- 6 -0.148339 0.832938 1.787600 -1.383767
-
- You can use `random state` for reproducibility:
-
- >>> df.sample(random_state=1)
- A B C D
- 37 -2.027662 0.103611 0.237496 -0.165867
- 43 -0.259323 -0.583426 1.516140 -0.479118
- 12 -1.686325 -0.579510 0.985195 -0.460286
- 8 1.167946 0.429082 1.215742 -1.636041
- 9 1.197475 -0.864188 1.554031 -1.505264
+ Examples
+ --------
+ >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
+ ... 'num_wings': [2, 0, 0, 0],
+ ... 'num_specimen_seen': [10, 2, 1, 8]},
+ ... index=['falcon', 'dog', 'spider', 'fish'])
+ >>> df
+ num_legs num_wings num_specimen_seen
+ falcon 2 2 10
+ dog 4 0 2
+ spider 8 0 1
+ fish 0 0 8
+
+ Extract 3 random elements from the ``Series`` ``df['num_legs']``:
+ Note that we use `random_state` to ensure the reproducibility of
+ the examples.
+
+ >>> df['num_legs'].sample(n=3, random_state=1)
+ fish 0
+ spider 8
+ falcon 2
+ Name: num_legs, dtype: int64
+
+ A random 50% sample of the ``DataFrame`` with replacement:
+
+ >>> df.sample(frac=0.5, replace=True, random_state=1)
+ num_legs num_wings num_specimen_seen
+ dog 4 0 2
+ fish 0 0 8
+
+ Using a DataFrame column as weights. Rows with larger value in the
+ `num_specimen_seen` column are more likely to be sampled.
+
+ >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
+ num_legs num_wings num_specimen_seen
+ falcon 2 2 10
+ fish 0 0 8
"""
if axis is None:
| - [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Based on #22459. Fix the docstring for DataFrame.sample. I also updated `ci/doctests.sh`.
I was on the fence if I should remove mentions of `Panels` since it is deprecated. | https://api.github.com/repos/pandas-dev/pandas/pulls/22937 | 2018-10-02T02:29:06Z | 2018-10-08T04:31:21Z | 2018-10-08T04:31:21Z | 2018-10-09T14:39:27Z |
Catch Exception in combine | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 7bf13fb2fecc0..f7c4ee35adfe4 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -739,14 +739,22 @@ def _create_method(cls, op, coerce_to_dtype=True):
----------
op : function
An operator that takes arguments op(a, b)
- coerce_to_dtype : bool
+ coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
- the result to the underlying ExtensionArray dtype
- (default True)
+ the result to the underlying ExtensionArray dtype.
+ If it's not possible to create a new ExtensionArray with the
+ values, an ndarray is returned instead.
Returns
-------
- A method that can be bound to a method of a class
+ Callable[[Any, Any], Union[ndarray, ExtensionArray]]
+ A method that can be bound to a class. When used, the method
+ receives the two arguments, one of which is the instance of
+ this class, and should return an ExtensionArray or an ndarray.
+
+ Returning an ndarray may be necessary when the result of the
+ `op` cannot be stored in the ExtensionArray. The dtype of the
+ ndarray uses NumPy's normal inference rules.
Example
-------
@@ -757,7 +765,6 @@ def _create_method(cls, op, coerce_to_dtype=True):
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
-
"""
def _binop(self, other):
@@ -777,8 +784,13 @@ def convert_values(param):
if coerce_to_dtype:
try:
res = self._from_sequence(res)
- except TypeError:
- pass
+ except Exception:
+ # https://github.com/pandas-dev/pandas/issues/22850
+ # We catch all regular exceptions here, and fall back
+ # to an ndarray.
+ res = np.asarray(res)
+ else:
+ res = np.asarray(res)
return res
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 82198c2b3edd5..2e22e4e6e1bfc 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2323,10 +2323,14 @@ def combine(self, other, func, fill_value=None):
pass
elif is_extension_array_dtype(self.values):
# The function can return something of any type, so check
- # if the type is compatible with the calling EA
+ # if the type is compatible with the calling EA.
try:
new_values = self._values._from_sequence(new_values)
- except TypeError:
+ except Exception:
+ # https://github.com/pandas-dev/pandas/issues/22850
+ # pandas has no control over what 3rd-party ExtensionArrays
+ # do in _values_from_sequence. We still want ops to work
+ # though, so we catch any regular Exception.
pass
return self._constructor(new_values, index=new_index, name=new_name)
diff --git a/pandas/tests/extension/decimal/__init__.py b/pandas/tests/extension/decimal/__init__.py
index e69de29bb2d1d..c37aad0af8407 100644
--- a/pandas/tests/extension/decimal/__init__.py
+++ b/pandas/tests/extension/decimal/__init__.py
@@ -0,0 +1,4 @@
+from .array import DecimalArray, DecimalDtype, to_decimal, make_data
+
+
+__all__ = ['DecimalArray', 'DecimalDtype', 'to_decimal', 'make_data']
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 387942234e6fd..79e1a692f744a 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -1,5 +1,6 @@
import decimal
import numbers
+import random
import sys
import numpy as np
@@ -138,5 +139,13 @@ def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
+def to_decimal(values, context=None):
+ return DecimalArray([decimal.Decimal(x) for x in values], context=context)
+
+
+def make_data():
+ return [decimal.Decimal(random.random()) for _ in range(100)]
+
+
DecimalArray._add_arithmetic_ops()
DecimalArray._add_comparison_ops()
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 93b8ea786ef5b..dd625d6e1eb3c 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -1,6 +1,6 @@
+import operator
import decimal
-import random
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@@ -8,11 +8,7 @@
from pandas.tests.extension import base
-from .array import DecimalDtype, DecimalArray
-
-
-def make_data():
- return [decimal.Decimal(random.random()) for _ in range(100)]
+from .array import DecimalDtype, DecimalArray, make_data
@pytest.fixture
@@ -275,3 +271,47 @@ def test_compare_array(self, data, all_compare_operators):
other = pd.Series(data) * [decimal.Decimal(pow(2.0, i))
for i in alter]
self._compare_other(s, data, op_name, other)
+
+
+class DecimalArrayWithoutFromSequence(DecimalArray):
+ """Helper class for testing error handling in _from_sequence."""
+ def _from_sequence(cls, scalars, dtype=None, copy=False):
+ raise KeyError("For the test")
+
+
+class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
+ @classmethod
+ def _create_arithmetic_method(cls, op):
+ return cls._create_method(op, coerce_to_dtype=False)
+
+
+DecimalArrayWithoutCoercion._add_arithmetic_ops()
+
+
+def test_combine_from_sequence_raises():
+ # https://github.com/pandas-dev/pandas/issues/22850
+ ser = pd.Series(DecimalArrayWithoutFromSequence([
+ decimal.Decimal("1.0"),
+ decimal.Decimal("2.0")
+ ]))
+ result = ser.combine(ser, operator.add)
+
+ # note: object dtype
+ expected = pd.Series([decimal.Decimal("2.0"),
+ decimal.Decimal("4.0")], dtype="object")
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("class_", [DecimalArrayWithoutFromSequence,
+ DecimalArrayWithoutCoercion])
+def test_scalar_ops_from_sequence_raises(class_):
+ # op(EA, EA) should return an EA, or an ndarray if it's not possible
+ # to return an EA with the return values.
+ arr = class_([
+ decimal.Decimal("1.0"),
+ decimal.Decimal("2.0")
+ ])
+ result = arr + arr
+ expected = np.array([decimal.Decimal("2.0"), decimal.Decimal("4.0")],
+ dtype="object")
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/extension/json/__init__.py b/pandas/tests/extension/json/__init__.py
index e69de29bb2d1d..f2679d087c841 100644
--- a/pandas/tests/extension/json/__init__.py
+++ b/pandas/tests/extension/json/__init__.py
@@ -0,0 +1,3 @@
+from .array import JSONArray, JSONDtype, make_data
+
+__all__ = ['JSONArray', 'JSONDtype', 'make_data']
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 6ce0d63eb63ec..87876d84bef99 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -13,6 +13,8 @@
import collections
import itertools
import numbers
+import random
+import string
import sys
import numpy as np
@@ -179,3 +181,10 @@ def _values_for_argsort(self):
# cast them to an (N, P) array, instead of an (N,) array of tuples.
frozen = [()] + [tuple(x.items()) for x in self]
return np.array(frozen, dtype=object)[1:]
+
+
+def make_data():
+ # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
+ return [collections.UserDict([
+ (random.choice(string.ascii_letters), random.randint(0, 100))
+ for _ in range(random.randint(0, 10))]) for _ in range(100)]
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 93f10b7fbfc23..bcbc3e9109182 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -1,7 +1,5 @@
import operator
import collections
-import random
-import string
import pytest
@@ -10,18 +8,11 @@
from pandas.compat import PY2, PY36
from pandas.tests.extension import base
-from .array import JSONArray, JSONDtype
+from .array import JSONArray, JSONDtype, make_data
pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict")
-def make_data():
- # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
- return [collections.UserDict([
- (random.choice(string.ascii_letters), random.randint(0, 100))
- for _ in range(random.randint(0, 10))]) for _ in range(100)]
-
-
@pytest.fixture
def dtype():
return JSONDtype()
| Closes https://github.com/pandas-dev/pandas/issues/22850 | https://api.github.com/repos/pandas-dev/pandas/pulls/22936 | 2018-10-02T02:27:06Z | 2018-10-04T11:27:55Z | 2018-10-04T11:27:55Z | 2018-10-04T11:32:54Z |
Provide default implementation for `data_repated` | diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index 4bbbb7df2f399..8e397d228a5b6 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -31,12 +31,24 @@ def all_data(request, data, data_missing):
@pytest.fixture
-def data_repeated():
- """Return different versions of data for count times"""
+def data_repeated(data):
+ """
+ Generate many datasets.
+
+ Parameters
+ ----------
+ data : fixture implementing `data`
+
+ Returns
+ -------
+ Callable[[int], Generator]:
+ A callable that takes a `count` argument and
+ returns a generator yielding `count` datasets.
+ """
def gen(count):
for _ in range(count):
- yield NotImplementedError
- yield gen
+ yield data
+ return gen
@pytest.fixture
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 03fdd25826b79..93b8ea786ef5b 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -30,14 +30,6 @@ def data_missing():
return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)])
-@pytest.fixture
-def data_repeated():
- def gen(count):
- for _ in range(count):
- yield DecimalArray(make_data())
- yield gen
-
-
@pytest.fixture
def data_for_sorting():
return DecimalArray([decimal.Decimal('1'),
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 6c6cf80c16da6..ff66f53eab6f6 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -45,15 +45,6 @@ def data_missing():
return Categorical([np.nan, 'A'])
-@pytest.fixture
-def data_repeated():
- """Return different versions of data for count times"""
- def gen(count):
- for _ in range(count):
- yield Categorical(make_data())
- yield gen
-
-
@pytest.fixture
def data_for_sorting():
return Categorical(['A', 'B', 'C'], categories=['C', 'A', 'B'],
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 57e0922a0b7d9..7aa33006dadda 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -47,14 +47,6 @@ def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
-@pytest.fixture
-def data_repeated(data):
- def gen(count):
- for _ in range(count):
- yield data
- yield gen
-
-
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 34b98f590df0d..7302c5757d144 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -47,15 +47,6 @@ def data_missing():
return IntervalArray.from_tuples([None, (0, 1)])
-@pytest.fixture
-def data_repeated():
- """Return different versions of data for count times"""
- def gen(count):
- for _ in range(count):
- yield IntervalArray(make_data())
- yield gen
-
-
@pytest.fixture
def data_for_sorting():
return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])
| Split from PeriodArray.
The previous implementation was not great, since failing to override it would result in `Series([NotImplementedError, NotImplementedError...])`. We could raise instead, forcing downstream arrays to implement it, or just repeat `data`. I opted for the latter, which integer_array was already doing. | https://api.github.com/repos/pandas-dev/pandas/pulls/22935 | 2018-10-02T02:15:30Z | 2018-10-02T13:50:42Z | 2018-10-02T13:50:42Z | 2018-10-02T13:50:45Z |
BUG: divmod return type | diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 9422434a1d998..ab940384594bc 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -160,9 +160,18 @@ your ``MyExtensionArray`` class, as follows:
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
-Note that since ``pandas`` automatically calls the underlying operator on each
-element one-by-one, this might not be as performant as implementing your own
-version of the associated operators directly on the ``ExtensionArray``.
+
+.. note::
+
+ Since ``pandas`` automatically calls the underlying operator on each
+ element one-by-one, this might not be as performant as implementing your own
+ version of the associated operators directly on the ``ExtensionArray``.
+
+For arithmetic operations, this implementation will try to reconstruct a new
+``ExtensionArray`` with the result of the element-wise operation. Whether
+or not that succeeds depends on whether the operation returns a result
+that's valid for the ``ExtensionArray``. If an ``ExtensionArray`` cannot
+be reconstructed, an ndarray containing the scalars returned instead.
.. _extending.extension.testing:
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index f7c4ee35adfe4..efe587c6aaaad 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -781,17 +781,24 @@ def convert_values(param):
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
- if coerce_to_dtype:
- try:
- res = self._from_sequence(res)
- except Exception:
+ def _maybe_convert(arr):
+ if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
- res = np.asarray(res)
+ try:
+ res = self._from_sequence(arr)
+ except Exception:
+ res = np.asarray(arr)
+ else:
+ res = np.asarray(arr)
+ return res
+
+ if op.__name__ in {'divmod', 'rdivmod'}:
+ a, b = zip(*res)
+ res = _maybe_convert(a), _maybe_convert(b)
else:
- res = np.asarray(res)
-
+ res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index ee4a92146128b..36696bc292162 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -58,7 +58,8 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=TypeError)
- @pytest.mark.xfail(run=False, reason="_reduce needs implementation")
+ @pytest.mark.xfail(run=False, reason="_reduce needs implementation",
+ strict=True)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
@@ -77,6 +78,10 @@ def test_divmod(self, data):
self._check_divmod_op(s, divmod, 1, exc=TypeError)
self._check_divmod_op(1, ops.rdivmod, s, exc=TypeError)
+ def test_divmod_series_array(self, data):
+ s = pd.Series(data)
+ self._check_divmod_op(s, divmod, data)
+
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index dd625d6e1eb3c..6488c7724229b 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -8,7 +8,7 @@
from pandas.tests.extension import base
-from .array import DecimalDtype, DecimalArray, make_data
+from .array import DecimalDtype, DecimalArray, make_data, to_decimal
@pytest.fixture
@@ -102,7 +102,7 @@ class TestInterface(BaseDecimal, base.BaseInterfaceTests):
class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
- @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ @pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
@@ -240,9 +240,11 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators):
context.traps[decimal.DivisionByZero] = divbyzerotrap
context.traps[decimal.InvalidOperation] = invalidoptrap
- @pytest.mark.skip(reason="divmod not appropriate for decimal")
- def test_divmod(self, data):
- pass
+ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
+ # We implement divmod
+ super(TestArithmeticOps, self)._check_divmod_op(
+ s, op, other, exc=None
+ )
def test_error(self):
pass
@@ -315,3 +317,21 @@ def test_scalar_ops_from_sequence_raises(class_):
expected = np.array([decimal.Decimal("2.0"), decimal.Decimal("4.0")],
dtype="object")
tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("reverse, expected_div, expected_mod", [
+ (False, [0, 1, 1, 2], [1, 0, 1, 0]),
+ (True, [2, 1, 0, 0], [0, 0, 2, 2]),
+])
+def test_divmod_array(reverse, expected_div, expected_mod):
+ # https://github.com/pandas-dev/pandas/issues/22930
+ arr = to_decimal([1, 2, 3, 4])
+ if reverse:
+ div, mod = divmod(2, arr)
+ else:
+ div, mod = divmod(arr, 2)
+ expected_div = to_decimal(expected_div)
+ expected_mod = to_decimal(expected_mod)
+
+ tm.assert_extension_array_equal(div, expected_div)
+ tm.assert_extension_array_equal(mod, expected_mod)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index bcbc3e9109182..115afdcc99f2b 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -131,8 +131,7 @@ def test_custom_asserts(self):
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
- # TODO: Should this be pytest.mark.skip?
- @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ @pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
@@ -147,13 +146,11 @@ class TestGetitem(BaseJSON, base.BaseGetitemTests):
class TestMissing(BaseJSON, base.BaseMissingTests):
- # TODO: Should this be pytest.mark.skip?
- @pytest.mark.xfail(reason="Setting a dict as a scalar")
+ @pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
- # TODO: Should this be pytest.mark.skip?
- @pytest.mark.xfail(reason="Setting a dict as a scalar")
+ @pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@@ -204,8 +201,7 @@ def test_combine_add(self, data_repeated):
class TestCasting(BaseJSON, base.BaseCastingTests):
- # TODO: Should this be pytest.mark.skip?
- @pytest.mark.xfail(reason="failing on np.array(self, dtype=str)")
+ @pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
@@ -257,6 +253,11 @@ def test_add_series_with_extension_array(self, data):
with tm.assert_raises_regex(TypeError, "unsupported"):
ser + data
+ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
+ return super(TestArithmeticOps, self)._check_divmod_op(
+ s, op, other, exc=TypeError
+ )
+
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index c588552572aed..f118279c4b915 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -140,11 +140,11 @@ def test_take_series(self):
def test_reindex_non_na_fill_value(self):
pass
- @pytest.mark.xfail(reason="Categorical.take buggy")
+ @pytest.mark.skip(reason="Categorical.take buggy")
def test_take_empty(self):
pass
- @pytest.mark.xfail(reason="test not written correctly for categorical")
+ @pytest.mark.skip(reason="test not written correctly for categorical")
def test_reindex(self):
pass
@@ -208,6 +208,11 @@ def test_add_series_with_extension_array(self, data):
with tm.assert_raises_regex(TypeError, "cannot perform"):
ser + data
+ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
+ return super(TestArithmeticOps, self)._check_divmod_op(
+ s, op, other, exc=TypeError
+ )
+
class TestComparisonOps(base.BaseComparisonOpsTests):
| Closes https://github.com/pandas-dev/pandas/issues/22930 | https://api.github.com/repos/pandas-dev/pandas/pulls/22932 | 2018-10-01T21:52:22Z | 2018-10-08T13:53:06Z | 2018-10-08T13:53:06Z | 2018-10-08T13:56:57Z |
Solved issue #22471 Replaced TestData with fixtures | diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index baebf414969be..ef68f796a8b02 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -33,44 +33,44 @@ class TestDataFrameAnalytics(TestData):
@td.skip_if_no_scipy
def test_corr_pearson(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
- correls = self.frame.corr(method=method)
- exp = self.frame['A'].corr(self.frame['C'], method=method)
+ correls = float_frame.corr(method=method)
+ exp = float_frame['A'].corr(float_frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
- result = self.frame.corr(min_periods=len(self.frame) - 8)
- expected = self.frame.corr()
+ result = float_frame.corr(min_periods=len(float_frame) - 8)
+ expected = float_frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
- self.frame['A'][:5] = nan
- self.frame['B'][5:10] = nan
+ float_frame['A'][:5] = nan
+ float_frame['B'][5:10] = nan
# exclude non-numeric types
- result = self.mixed_frame.corr()
- expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
+ result = float_string_frame.corr()
+ expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@@ -140,34 +140,34 @@ def test_corr_invalid_method(self):
def test_cov(self):
# min_periods no NAs (corner case)
- expected = self.frame.cov()
- result = self.frame.cov(min_periods=len(self.frame))
+ expected = float_frame.cov()
+ result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
- result = self.frame.cov(min_periods=len(self.frame) + 1)
+ result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
- frame = self.frame.copy()
+ frame = float_frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
- result = self.frame.cov(min_periods=len(self.frame) - 8)
- expected = self.frame.cov()
+ result = float_frame.cov(min_periods=len(float_frame) - 8)
+ expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
- self.frame['A'][:5] = nan
- self.frame['B'][:10] = nan
- cov = self.frame.cov()
+ float_frame['A'][:5] = nan
+ float_frame['B'][:10] = nan
+ cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
- self.frame['A'].cov(self.frame['C']))
+ float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
- result = self.mixed_frame.cov()
- expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
+ result = float_string_frame.cov()
+ expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
@@ -183,10 +183,10 @@ def test_cov(self):
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
- a = self.tsframe
+ a = datetime_frame
noise = Series(randn(len(a)), index=a.index)
- b = self.tsframe.add(noise, axis=0)
+ b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
@@ -232,8 +232,8 @@ def test_corrwith_with_objects(self):
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
- result = self.tsframe.corrwith(self.tsframe['A'])
- expected = self.tsframe.apply(self.tsframe['A'].corr)
+ result = datetime_frame.corrwith(datetime_frame['A'])
+ expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
@@ -513,7 +513,7 @@ def test_sum(self):
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
- frame=self.mixed_float.astype('float32'),
+ frame=mixed_float_frame.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@@ -563,21 +563,21 @@ def test_min(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self._check_stat_op('min', np.min, check_dates=True)
- self._check_stat_op('min', np.min, frame=self.intframe)
+ self._check_stat_op('min', np.min, frame=int_frame)
def test_cummin(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cummin = self.tsframe.cummin()
- expected = self.tsframe.apply(Series.cummin)
+ cummin = datetime_frame.cummin()
+ expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
- cummin = self.tsframe.cummin(axis=1)
- expected = self.tsframe.apply(Series.cummin, axis=1)
+ cummin = datetime_frame.cummin(axis=1)
+ expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
@@ -585,22 +585,22 @@ def test_cummin(self):
result = df.cummin() # noqa
# fix issue
- cummin_xs = self.tsframe.cummin(axis=1)
- assert np.shape(cummin_xs) == np.shape(self.tsframe)
+ cummin_xs = datetime_frame.cummin(axis=1)
+ assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cummax = self.tsframe.cummax()
- expected = self.tsframe.apply(Series.cummax)
+ cummax = datetime_frame.cummax()
+ expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
- cummax = self.tsframe.cummax(axis=1)
- expected = self.tsframe.apply(Series.cummax, axis=1)
+ cummax = datetime_frame.cummax(axis=1)
+ expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
@@ -608,14 +608,14 @@ def test_cummax(self):
result = df.cummax() # noqa
# fix issue
- cummax_xs = self.tsframe.cummax(axis=1)
- assert np.shape(cummax_xs) == np.shape(self.tsframe)
+ cummax_xs = datetime_frame.cummax(axis=1)
+ assert np.shape(cummax_xs) == np.shape(datetime_frame)
def test_max(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self._check_stat_op('max', np.max, check_dates=True)
- self._check_stat_op('max', np.max, frame=self.intframe)
+ self._check_stat_op('max', np.max, frame=int_frame)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
@@ -628,12 +628,12 @@ def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
- result = self.tsframe.std(ddof=4)
- expected = self.tsframe.apply(lambda x: x.std(ddof=4))
+ result = datetime_frame.std(ddof=4)
+ expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
- result = self.tsframe.var(ddof=4)
- expected = self.tsframe.apply(lambda x: x.var(ddof=4))
+ result = datetime_frame.var(ddof=4)
+ expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
@@ -686,18 +686,18 @@ def test_mixed_ops(self, op):
assert len(result) == 2
def test_cumsum(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cumsum = self.tsframe.cumsum()
- expected = self.tsframe.apply(Series.cumsum)
+ cumsum = datetime_frame.cumsum()
+ expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
- cumsum = self.tsframe.cumsum(axis=1)
- expected = self.tsframe.apply(Series.cumsum, axis=1)
+ cumsum = datetime_frame.cumsum(axis=1)
+ expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
@@ -705,35 +705,35 @@ def test_cumsum(self):
result = df.cumsum() # noqa
# fix issue
- cumsum_xs = self.tsframe.cumsum(axis=1)
- assert np.shape(cumsum_xs) == np.shape(self.tsframe)
+ cumsum_xs = datetime_frame.cumsum(axis=1)
+ assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self):
- self.tsframe.loc[5:10, 0] = nan
- self.tsframe.loc[10:15, 1] = nan
- self.tsframe.loc[15:, 2] = nan
+ datetime_frame.loc[5:10, 0] = nan
+ datetime_frame.loc[10:15, 1] = nan
+ datetime_frame.loc[15:, 2] = nan
# axis = 0
- cumprod = self.tsframe.cumprod()
- expected = self.tsframe.apply(Series.cumprod)
+ cumprod = datetime_frame.cumprod()
+ expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
- cumprod = self.tsframe.cumprod(axis=1)
- expected = self.tsframe.apply(Series.cumprod, axis=1)
+ cumprod = datetime_frame.cumprod(axis=1)
+ expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
- cumprod_xs = self.tsframe.cumprod(axis=1)
- assert np.shape(cumprod_xs) == np.shape(self.tsframe)
+ cumprod_xs = datetime_frame.cumprod(axis=1)
+ assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
- df = self.tsframe.fillna(0).astype(int)
+ df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
- df = self.tsframe.fillna(0).astype(np.int32)
+ df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
@@ -741,8 +741,8 @@ def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
- result = self.tsframe.sem(ddof=4)
- expected = self.tsframe.apply(
+ result = datetime_frame.sem(ddof=4)
+ expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
@@ -793,7 +793,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
- frame = self.frame
+ frame = float_frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
@@ -853,18 +853,18 @@ def wrapper(x):
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
- getattr(self.mixed_frame, name)(axis=0)
- getattr(self.mixed_frame, name)(axis=1)
+ getattr(float_string_frame, name)(axis=0)
+ getattr(float_string_frame, name)(axis=1)
if has_numeric_only:
- getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
- getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
- getattr(self.frame, name)(axis=0, numeric_only=False)
- getattr(self.frame, name)(axis=1, numeric_only=False)
+ getattr(float_string_frame, name)(axis=0, numeric_only=True)
+ getattr(float_string_frame, name)(axis=1, numeric_only=True)
+ getattr(float_frame, name)(axis=0, numeric_only=False)
+ getattr(float_frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
- all_na = self.frame * np.NaN
+ all_na = float_frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
@@ -1023,8 +1023,8 @@ def test_operators_timedelta64(self):
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
- axis0 = self.empty.sum(0)
- axis1 = self.empty.sum(1)
+ axis0 = empty_frame.sum(0)
+ axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
@@ -1091,41 +1091,41 @@ def test_sum_nanops_timedelta(self):
tm.assert_series_equal(result, expected)
def test_sum_object(self):
- values = self.frame.values.astype(int)
- frame = DataFrame(values, index=self.frame.index,
- columns=self.frame.columns)
+ values = float_frame.values.astype(int)
+ frame = DataFrame(values, index=float_frame.index,
+ columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
- bools = np.isnan(self.frame)
+ bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
- the_mean = self.mixed_frame.mean(axis=0)
- the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
+ the_mean = float_string_frame.mean(axis=0)
+ the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
- assert len(the_mean.index) < len(self.mixed_frame.columns)
+ assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
- the_mean = self.mixed_frame.mean(axis=1)
- the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
+ the_mean = float_string_frame.mean(axis=1)
+ the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
- self.frame['bool'] = self.frame['A'] > 0
- means = self.frame.mean(0)
- assert means['bool'] == self.frame['bool'].values.mean()
+ float_frame['bool'] = float_frame['A'] > 0
+ means = float_frame.mean(0)
+ assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
- self.mixed_frame.std(1)
- self.mixed_frame.var(1)
- self.mixed_frame.mean(1)
- self.mixed_frame.skew(1)
+ float_string_frame.std(1)
+ float_string_frame.var(1)
+ float_string_frame.mean(1)
+ float_string_frame.skew(1)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
@@ -1135,14 +1135,14 @@ def wrapper(x):
return np.nan
return np.median(x)
- self._check_stat_op('median', wrapper, frame=self.intframe,
+ self._check_stat_op('median', wrapper, frame=int_frame,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
- dm = DataFrame(self.mixed_frame._series)
- df = DataFrame(self.mixed_frame._series)
+ dm = DataFrame(float_string_frame._series)
+ df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
@@ -1161,12 +1161,12 @@ def test_sum_bools(self):
# Index of max / min
def test_idxmin(self):
- frame = self.frame
+ frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
- for df in [frame, self.intframe]:
+ for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
@@ -1175,12 +1175,12 @@ def test_idxmin(self):
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
- frame = self.frame
+ frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
- for df in [frame, self.intframe]:
+ for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
@@ -1328,7 +1328,7 @@ def test_any_all_level_axis_none_raises(self, method):
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
- frame = self.frame > 0
+ frame = float_frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
@@ -1368,7 +1368,7 @@ def wrapper(x):
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
- mixed = self.mixed_frame
+ mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
@@ -1747,33 +1747,33 @@ def test_pct_change(self):
# Clip
def test_clip(self):
- median = self.frame.median().median()
- original = self.frame.copy()
+ median = float_frame.median().median()
+ original = float_frame.copy()
- capped = self.frame.clip_upper(median)
+ capped = float_frame.clip_upper(median)
assert not (capped.values > median).any()
- floored = self.frame.clip_lower(median)
+ floored = float_frame.clip_lower(median)
assert not (floored.values < median).any()
- double = self.frame.clip(upper=median, lower=median)
+ double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
- # Verify that self.frame was not changed inplace
- assert (self.frame.values == original.values).all()
+ # Verify that float_frame was not changed inplace
+ assert (float_frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
- median = self.frame.median().median()
- frame_copy = self.frame.copy()
+ median = float_frame.median().median()
+ frame_copy = float_frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
- frame_copy = self.frame.copy()
+ frame_copy = float_frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
- frame_copy = self.frame.copy()
+ frame_copy = float_frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
@@ -1841,7 +1841,7 @@ def test_clip_against_series(self, inplace):
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
- original = self.simple.copy(deep=True)
+ original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
@@ -1872,9 +1872,9 @@ def test_clip_against_frame(self, axis):
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
- tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
- tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
- self.frame)
+ tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
+ tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan),
+ float_frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 0bc74c6890ee9..dbb9aff2a900e 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -207,33 +207,33 @@ def test_merge_join_different_levels(self):
tm.assert_frame_equal(result, expected)
def test_reindex(self):
- newFrame = self.frame.reindex(self.ts1.index)
+ newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
- if idx in self.frame.index:
+ if idx in float_frame.index:
if np.isnan(val):
- assert np.isnan(self.frame[col][idx])
+ assert np.isnan(float_frame[col][idx])
else:
- assert val == self.frame[col][idx]
+ assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
assert tm.equalContents(series.index, newFrame.index)
- emptyFrame = self.frame.reindex(Index([]))
+ emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
- nonContigFrame = self.frame.reindex(self.ts1.index[::2])
+ nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
- if idx in self.frame.index:
+ if idx in float_frame.index:
if np.isnan(val):
- assert np.isnan(self.frame[col][idx])
+ assert np.isnan(float_frame[col][idx])
else:
- assert val == self.frame[col][idx]
+ assert val == float_frame[col][idx]
else:
assert np.isnan(val)
@@ -243,28 +243,28 @@ def test_reindex(self):
# corner cases
# Same index, copies values but not index if copy=False
- newFrame = self.frame.reindex(self.frame.index, copy=False)
- assert newFrame.index is self.frame.index
+ newFrame = float_frame.reindex(float_frame.index, copy=False)
+ assert newFrame.index is float_frame.index
# length zero
- newFrame = self.frame.reindex([])
+ newFrame = float_frame.reindex([])
assert newFrame.empty
- assert len(newFrame.columns) == len(self.frame.columns)
+ assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
- newFrame = self.frame.reindex([])
- newFrame = newFrame.reindex(self.frame.index)
- assert len(newFrame.index) == len(self.frame.index)
- assert len(newFrame.columns) == len(self.frame.columns)
+ newFrame = float_frame.reindex([])
+ newFrame = newFrame.reindex(float_frame.index)
+ assert len(newFrame.index) == len(float_frame.index)
+ assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
- newFrame = self.frame.reindex(list(self.ts1.index))
- tm.assert_index_equal(newFrame.index, self.ts1.index)
+ newFrame = float_frame.reindex(list(datetime_series.index))
+ tm.assert_index_equal(newFrame.index, datetime_series.index)
# copy with no axes
- result = self.frame.reindex()
- assert_frame_equal(result, self.frame)
- assert result is not self.frame
+ result = float_frame.reindex()
+ assert_frame_equal(result, float_frame)
+ assert result is not float_frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
@@ -308,31 +308,31 @@ def test_reindex_name_remains(self):
assert df.columns.name == 'iname'
def test_reindex_int(self):
- smaller = self.intframe.reindex(self.intframe.index[::2])
+ smaller = int_frame.reindex(int_frame.index[::2])
assert smaller['A'].dtype == np.int64
- bigger = smaller.reindex(self.intframe.index)
+ bigger = smaller.reindex(int_frame.index)
assert bigger['A'].dtype == np.float64
- smaller = self.intframe.reindex(columns=['A', 'B'])
+ smaller = int_frame.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
- other = self.frame.reindex(index=self.frame.index[:10],
+ other = float_frame.reindex(index=float_frame.index[:10],
columns=['C', 'B'])
- assert_frame_equal(other, self.frame.reindex_like(other))
+ assert_frame_equal(other, float_frame.reindex_like(other))
def test_reindex_columns(self):
- new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
+ new_frame = float_frame.reindex(columns=['A', 'B', 'E'])
- tm.assert_series_equal(new_frame['B'], self.frame['B'])
+ tm.assert_series_equal(new_frame['B'], float_frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
- new_frame = self.frame.reindex(columns=[])
+ new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
@@ -546,40 +546,40 @@ def test_reindex_api_equivalence(self):
tm.assert_frame_equal(res1, res)
def test_align(self):
- af, bf = self.frame.align(self.frame)
- assert af._data is not self.frame._data
+ af, bf = float_frame.align(float_frame)
+ assert af._data is not float_frame._data
- af, bf = self.frame.align(self.frame, copy=False)
- assert af._data is self.frame._data
+ af, bf = float_frame.align(float_frame, copy=False)
+ assert af._data is float_frame._data
# axis = 0
- other = self.frame.iloc[:-5, :3]
- af, bf = self.frame.align(other, axis=0, fill_value=-1)
+ other = float_frame.iloc[:-5, :3]
+ af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
- join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.difference(join_idx)
+ join_idx = float_frame.index.join(other.index)
+ diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
- af, bf = self.frame.align(other, join='right', axis=0)
+ af, bf = float_frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
- other = self.frame.iloc[:-5, :3].copy()
- af, bf = self.frame.align(other, axis=1)
- tm.assert_index_equal(bf.columns, self.frame.columns)
+ other = float_frame.iloc[:-5, :3].copy()
+ af, bf = float_frame.align(other, axis=1)
+ tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
- join_idx = self.frame.index.join(other.index)
- diff_a = self.frame.index.difference(join_idx)
+ join_idx = float_frame.index.join(other.index)
+ diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
@@ -588,57 +588,57 @@ def test_align(self):
assert (diff_a_vals == -1).all()
- af, bf = self.frame.align(other, join='inner', axis=1)
+ af, bf = float_frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
- af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
+ af, bf = float_frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
- af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
+ af, bf = int_frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
- af, bf = self.mixed_frame.align(self.mixed_frame,
+ af, bf = float_string_frame.align(float_string_frame,
join='inner', axis=1, method='pad')
- tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
+ tm.assert_index_equal(bf.columns, float_string_frame.columns)
- af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
+ af, bf = float_frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
- af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
+ af, bf = float_frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
- af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
+ af, bf = mixed_float_frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
- af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
+ af, bf = mixed_int_frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
- self.frame.align(af.iloc[0, :3], join='inner', axis=2)
+ float_frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
- idx = self.frame.index
+ idx = float_frame.index
s = Series(range(len(idx)), index=idx)
- left, right = self.frame.align(s, axis=0)
- tm.assert_index_equal(left.index, self.frame.index)
- tm.assert_index_equal(right.index, self.frame.index)
+ left, right = float_frame.align(s, axis=0)
+ tm.assert_index_equal(left.index, float_frame.index)
+ tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
- left, right = self.frame.align(s, broadcast_axis=1)
- tm.assert_index_equal(left.index, self.frame.index)
+ left, right = float_frame.align(s, broadcast_axis=1)
+ tm.assert_index_equal(left.index, float_frame.index)
expected = {}
- for c in self.frame.columns:
+ for c in float_frame.columns:
expected[c] = s
- expected = DataFrame(expected, index=self.frame.index,
- columns=self.frame.columns)
+ expected = DataFrame(expected, index=float_frame.index,
+ columns=float_frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
@@ -682,9 +682,9 @@ def test_align_fill_method(self, how, meth, ax, fax):
self._check_align_fill(how, meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
- left = self.frame.iloc[0:4, :10]
- right = self.frame.iloc[2:, 6:]
- empty = self.frame.iloc[:0, :0]
+ left = float_frame.iloc[0:4, :10]
+ right = float_frame.iloc[2:, 6:]
+ empty = float_frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
@@ -779,22 +779,22 @@ def test_align_series_combinations(self):
def test_filter(self):
# Items
- filtered = self.frame.filter(['A', 'B', 'E'])
+ filtered = float_frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
- filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
+ filtered = float_frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
- idx = self.frame.index[0:4]
- filtered = self.frame.filter(idx, axis='index')
- expected = self.frame.reindex(index=idx)
+ idx = float_frame.index[0:4]
+ filtered = float_frame.filter(idx, axis='index')
+ expected = float_frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
- fcopy = self.frame.copy()
+ fcopy = float_frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
@@ -821,35 +821,35 @@ def test_filter(self):
# pass in None
with tm.assert_raises_regex(TypeError, 'Must pass'):
- self.frame.filter()
+ float_frame.filter()
with tm.assert_raises_regex(TypeError, 'Must pass'):
- self.frame.filter(items=None)
+ float_frame.filter(items=None)
with tm.assert_raises_regex(TypeError, 'Must pass'):
- self.frame.filter(axis=1)
+ float_frame.filter(axis=1)
# test mutually exclusive arguments
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
+ float_frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
+ float_frame.filter(items=['one', 'three'], regex='e$', axis=1)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
- self.frame.filter(items=['one', 'three'], regex='e$')
+ float_frame.filter(items=['one', 'three'], regex='e$')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
- self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
+ float_frame.filter(items=['one', 'three'], like='bbi', axis=0)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
- self.frame.filter(items=['one', 'three'], like='bbi')
+ float_frame.filter(items=['one', 'three'], like='bbi')
# objects
- filtered = self.mixed_frame.filter(like='foo')
+ filtered = float_string_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
- df = self.frame.rename(columns={'B': u('\u2202')})
+ df = float_frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
- fcopy = self.frame.copy()
+ fcopy = float_frame.copy()
fcopy['AA'] = 1
# regex
@@ -901,26 +901,26 @@ def test_select(self):
# deprecated: gh-12410
f = lambda x: x.weekday() == 2
- index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
- expected_weekdays = self.tsframe.reindex(index=index)
+ index = datetime_frame.index[[f(x) for x in datetime_frame.index]]
+ expected_weekdays = datetime_frame.reindex(index=index)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- result = self.tsframe.select(f, axis=0)
+ result = datetime_frame.select(f, axis=0)
assert_frame_equal(result, expected_weekdays)
- result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
- expected = self.frame.reindex(columns=['B', 'D'])
+ result = float_frame.select(lambda x: x in ('B', 'D'), axis=1)
+ expected = float_frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# replacement
f = lambda x: x.weekday == 2
- result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
+ result = datetime_frame.loc(axis=0)[f(datetime_frame.index)]
assert_frame_equal(result, expected_weekdays)
crit = lambda x: x in ['B', 'D']
- result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
- expected = self.frame.reindex(columns=['B', 'D'])
+ result = float_frame.loc(axis=1)[(float_frame.columns.map(crit))]
+ expected = float_frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# doc example
@@ -935,7 +935,7 @@ def test_select(self):
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
- for df in [self.frame]:
+ for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -948,7 +948,7 @@ def test_take(self):
# negative indices
order = [2, 1, -1]
- for df in [self.frame]:
+ for df in [float_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -975,7 +975,7 @@ def test_take(self):
# mixed-dtype
order = [4, 1, 2, 0, 3]
- for df in [self.mixed_frame]:
+ for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -988,7 +988,7 @@ def test_take(self):
# negative indices
order = [4, 1, -2]
- for df in [self.mixed_frame]:
+ for df in [float_string_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -1001,7 +1001,7 @@ def test_take(self):
# by dtype
order = [1, 2, 0, 3]
- for df in [self.mixed_float, self.mixed_int]:
+ for df in [mixed_float_frame, mixed_int_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
@@ -1026,45 +1026,45 @@ def test_reindex_boolean(self):
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
- reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
+ reindexed = float_string_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
- reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
+ reindexed = float_string_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
- dm = self.empty.reindex(index=[1, 2, 3])
+ dm = empty_frame.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
- smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
+ smaller = int_frame.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
- reindexed1 = self.intframe.reindex_axis(cols, axis=1)
+ reindexed1 = int_frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
- reindexed2 = self.intframe.reindex(columns=cols)
+ reindexed2 = int_frame.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
- rows = self.intframe.index[0:5]
+ rows = int_frame.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
- reindexed1 = self.intframe.reindex_axis(rows, axis=0)
+ reindexed1 = int_frame.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
- reindexed2 = self.intframe.reindex(index=rows)
+ reindexed2 = int_frame.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
- pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
+ pytest.raises(ValueError, int_frame.reindex_axis, rows, axis=2)
# no-op case
- cols = self.frame.columns.copy()
+ cols = float_frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
- newFrame = self.frame.reindex_axis(cols, axis=1)
+ newFrame = float_frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
- assert_frame_equal(newFrame, self.frame)
+ assert_frame_equal(newFrame, float_frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 3fe1c84174acb..c21c1575ab667 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -32,17 +32,17 @@
class TestDataFrameBlockInternals(TestData):
def test_cast_internals(self):
- casted = DataFrame(self.frame._data, dtype=int)
- expected = DataFrame(self.frame._series, dtype=int)
+ casted = DataFrame(float_frame._data, dtype=int)
+ expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
- casted = DataFrame(self.frame._data, dtype=np.int32)
- expected = DataFrame(self.frame._series, dtype=np.int32)
+ casted = DataFrame(float_frame._data, dtype=np.int32)
+ expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
- self.frame['E'] = 7.
- consolidated = self.frame._consolidate()
+ float_frame['E'] = 7.
+ consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
@@ -50,92 +50,92 @@ def test_consolidate(self):
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
- self.frame['F'] = 8.
- assert len(self.frame._data.blocks) == 3
+ float_frame['F'] = 8.
+ assert len(float_frame._data.blocks) == 3
- self.frame._consolidate(inplace=True)
- assert len(self.frame._data.blocks) == 1
+ float_frame._consolidate(inplace=True)
+ assert len(float_frame._data.blocks) == 1
def test_consolidate_deprecation(self):
- self.frame['E'] = 7
+ float_frame['E'] = 7
with tm.assert_produces_warning(FutureWarning):
- self.frame.consolidate()
+ float_frame.consolidate()
def test_consolidate_inplace(self):
- frame = self.frame.copy() # noqa
+ frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
- self.frame[chr(letter)] = chr(letter)
+ float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self):
- self.frame['E'] = 7.
- assert not self.frame._data.is_consolidated()
- _ = self.frame.values # noqa
- assert self.frame._data.is_consolidated()
+ float_frame['E'] = 7.
+ assert not float_frame._data.is_consolidated()
+ _ = float_frame.values # noqa
+ assert float_frame._data.is_consolidated()
def test_modify_values(self):
- self.frame.values[5] = 5
- assert (self.frame.values[5] == 5).all()
+ float_frame.values[5] = 5
+ assert (float_frame.values[5] == 5).all()
# unconsolidated
- self.frame['E'] = 7.
- self.frame.values[6] = 6
- assert (self.frame.values[6] == 6).all()
+ float_frame['E'] = 7.
+ float_frame.values[6] = 6
+ assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self):
- self.frame['E'] = 7.
+ float_frame['E'] = 7.
- expected = self.frame.values.copy()
+ expected = float_frame.values.copy()
expected[expected > 1] = 2
- self.frame[self.frame > 1] = 2
- assert_almost_equal(expected, self.frame.values)
+ float_frame[float_frame > 1] = 2
+ assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self):
- self.frame['foo'] = 'bar'
+ float_frame['foo'] = 'bar'
- values = self.frame[['A', 'B', 'C', 'D']].values
+ values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self):
# mixed lcd
- values = self.mixed_float[['A', 'B', 'C', 'D']].values
+ values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
- values = self.mixed_float[['A', 'B', 'C']].values
+ values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
- values = self.mixed_float[['C']].values
+ values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
- values = self.mixed_int[['A', 'B', 'C', 'D']].values
+ values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
- values = self.mixed_int[['A', 'D']].values
+ values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
- values = self.mixed_int[['A', 'B', 'C']].values
+ values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
- values = self.mixed_int[['B', 'C']].values
+ values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
- values = self.mixed_int[['A', 'C']].values
+ values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
- values = self.mixed_int[['C', 'D']].values
+ values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
- values = self.mixed_int[['A']].values
+ values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
- values = self.mixed_int[['C']].values
+ values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
@@ -219,11 +219,11 @@ def test_construction_with_mixed(self):
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
- self.mixed_frame['datetime'] = datetime.now()
- self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
- assert self.mixed_frame['datetime'].dtype == 'M8[ns]'
- assert self.mixed_frame['timedelta'].dtype == 'm8[ns]'
- result = self.mixed_frame.get_dtype_counts().sort_values()
+ float_string_frame['datetime'] = datetime.now()
+ float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
+ assert float_string_frame['datetime'].dtype == 'M8[ns]'
+ assert float_string_frame['timedelta'].dtype == 'm8[ns]'
+ result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
@@ -298,7 +298,7 @@ def test_equals_different_blocks(self):
def test_copy_blocks(self):
# API/ENH 9607
- df = DataFrame(self.frame, copy=True)
+ df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
@@ -316,7 +316,7 @@ def test_copy_blocks(self):
def test_no_copy_blocks(self):
# API/ENH 9607
- df = DataFrame(self.frame, copy=True)
+ df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
@@ -333,28 +333,28 @@ def test_no_copy_blocks(self):
assert _df[column].equals(df[column])
def test_copy(self):
- cop = self.frame.copy()
+ cop = float_frame.copy()
cop['E'] = cop['A']
- assert 'E' not in self.frame
+ assert 'E' not in float_frame
# copy objects
- copy = self.mixed_frame.copy()
- assert copy._data is not self.mixed_frame._data
+ copy = float_string_frame.copy()
+ assert copy._data is not float_string_frame._data
def test_pickle(self):
- unpickled = tm.round_trip_pickle(self.mixed_frame)
- assert_frame_equal(self.mixed_frame, unpickled)
+ unpickled = tm.round_trip_pickle(float_string_frame)
+ assert_frame_equal(float_string_frame, unpickled)
# buglet
- self.mixed_frame._data.ndim
+ float_string_frame._data.ndim
# empty
- unpickled = tm.round_trip_pickle(self.empty)
+ unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
- unpickled = tm.round_trip_pickle(self.tzframe)
- assert_frame_equal(self.tzframe, unpickled)
+ unpickled = tm.round_trip_pickle(timezone_frame)
+ assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
@@ -389,8 +389,8 @@ def test_consolidate_datetime64(self):
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self):
- assert not self.frame._is_mixed_type
- assert self.mixed_frame._is_mixed_type
+ assert not float_frame._is_mixed_type
+ assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
@@ -450,21 +450,21 @@ def test_get_numeric_data_extension_dtype(self):
def test_convert_objects(self):
- oops = self.mixed_frame.T.T
+ oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
- assert_frame_equal(converted, self.mixed_frame)
+ assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
- self.mixed_frame['H'] = '1.'
- self.mixed_frame['I'] = '1'
+ float_string_frame['H'] = '1.'
+ float_string_frame['I'] = '1'
# add in some items that will be nan
- length = len(self.mixed_frame)
- self.mixed_frame['J'] = '1.'
- self.mixed_frame['K'] = '1'
- self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled'
- converted = self.mixed_frame._convert(datetime=True, numeric=True)
+ length = len(float_string_frame)
+ float_string_frame['J'] = '1.'
+ float_string_frame['K'] = '1'
+ float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
+ converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
@@ -473,14 +473,14 @@ def test_convert_objects(self):
assert len(converted['K'].dropna()) == length - 5
# via astype
- converted = self.mixed_frame.copy()
+ converted = float_string_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
- converted = self.mixed_frame.copy()
+ converted = float_string_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 15ca65395e4fc..c92f1da158ab4 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -459,20 +459,20 @@ def test_combine_first_mixed(self):
def test_combine_first(self):
# disjoint
- head, tail = self.frame[:5], self.frame[5:]
+ head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
- reordered_frame = self.frame.reindex(combined.index)
+ reordered_frame = float_frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
- assert tm.equalContents(combined.columns, self.frame.columns)
+ assert tm.equalContents(combined.columns, float_frame.columns)
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
- fcopy = self.frame.copy()
+ fcopy = float_frame.copy()
fcopy['A'] = 1
del fcopy['C']
- fcopy2 = self.frame.copy()
+ fcopy2 = float_frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
@@ -496,20 +496,20 @@ def test_combine_first(self):
assert (combined['A'][:10] == 0).all()
# no overlap
- f = self.frame[:10]
- g = self.frame[10:]
+ f = float_frame[:10]
+ g = float_frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
- comb = self.frame.combine_first(self.empty)
- assert_frame_equal(comb, self.frame)
+ comb = float_frame.combine_first(empty_frame)
+ assert_frame_equal(comb, float_frame)
- comb = self.empty.combine_first(self.frame)
- assert_frame_equal(comb, self.frame)
+ comb = empty_frame.combine_first(float_frame)
+ assert_frame_equal(comb, float_frame)
- comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
+ comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 2f1c9e05a01b0..f3fbb4d1aea70 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -48,7 +48,7 @@ def test_constructor_mixed(self):
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
- assert self.mixed_frame['foo'].dtype == np.object_
+ assert float_string_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
@@ -163,12 +163,12 @@ def test_constructor_dtype_str_na_values(self, string_dtype):
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
- rec = self.frame.to_records(index=False)
+ rec = float_frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
- index = self.frame.index
+ index = float_frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
@@ -228,24 +228,24 @@ def test_constructor_ordereddict(self):
assert expected == list(df.columns)
def test_constructor_dict(self):
- frame = DataFrame({'col1': self.ts1,
- 'col2': self.ts2})
+ frame = DataFrame({'col1': datetime_series,
+ 'col2': datetime_series_short})
# col2 is padded with NaN
- assert len(self.ts1) == 30
- assert len(self.ts2) == 25
+ assert len(datetime_series) == 30
+ assert len(datetime_series_short) == 25
- tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
+ tm.assert_series_equal(datetime_series, frame['col1'], check_names=False)
- exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
- index=self.ts1.index, name='col2')
+ exp = pd.Series(np.concatenate([[np.nan] * 5, datetime_series_short.values]),
+ index=datetime_series.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
- frame = DataFrame({'col1': self.ts1,
- 'col2': self.ts2},
+ frame = DataFrame({'col1': datetime_series,
+ 'col2': datetime_series_short},
columns=['col2', 'col3', 'col4'])
- assert len(frame) == len(self.ts2)
+ assert len(frame) == len(datetime_series_short)
assert 'col1' not in frame
assert isna(frame['col3']).all()
@@ -347,7 +347,7 @@ def test_constructor_dict_nan_tuple_key(self, value):
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
- d = {'b': self.ts2, 'a': self.ts1}
+ d = {'b': datetime_series_short, 'a': datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@@ -356,7 +356,7 @@ def test_constructor_dict_order_insertion(self):
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
- d = {'b': self.ts2, 'a': self.ts1}
+ d = {'b': datetime_series_short, 'a': datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
@@ -444,13 +444,13 @@ def test_constructor_subclass_dict(self):
# try with defaultdict
from collections import defaultdict
data = {}
- self.frame['B'][:10] = np.nan
- for k, v in compat.iteritems(self.frame):
+ float_frame['B'][:10] = np.nan
+ for k, v in compat.iteritems(float_frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
- tm.assert_frame_equal(self.frame.sort_index(), frame)
+ tm.assert_frame_equal(float_frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
@@ -838,10 +838,10 @@ def test_constructor_arrays_and_scalars(self):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
- df = DataFrame(self.frame)
- tm.assert_frame_equal(df, self.frame)
+ df = DataFrame(float_frame)
+ tm.assert_frame_equal(df, float_frame)
- df_casted = DataFrame(self.frame, dtype=np.int64)
+ df_casted = DataFrame(float_frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
@@ -870,8 +870,8 @@ def test_constructor_more(self):
with tm.assert_raises_regex(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
- dm = DataFrame(DataFrame(self.frame._series))
- tm.assert_frame_equal(dm, self.frame)
+ dm = DataFrame(DataFrame(float_frame._series))
+ tm.assert_frame_equal(dm, float_frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
@@ -1125,7 +1125,7 @@ def test_constructor_scalar(self):
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
- df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
+ df = DataFrame(float_frame['A'], index=float_frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
@@ -1167,9 +1167,9 @@ def test_constructor_namedtuples(self):
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
- data_dict = self.mixed_frame.T._series
+ data_dict = float_string_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
- expected = self.mixed_frame.sort_index()
+ expected = float_string_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
@@ -1272,37 +1272,37 @@ def test_constructor_Series_differently_indexed(self):
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
- index = list(self.frame.index[:5])
- columns = list(self.frame.columns[:3])
+ index = list(float_frame.index[:5])
+ columns = list(float_frame.columns[:3])
- result = DataFrame(self.frame._data, index=index,
+ result = DataFrame(float_frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
- items = [(c, self.frame[c]) for c in self.frame.columns]
+ items = [(c, float_frame[c]) for c in float_frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
- tm.assert_frame_equal(recons, self.frame)
+ tm.assert_frame_equal(recons, float_frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
- tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
+ tm.assert_frame_equal(recons, float_frame.loc[:, ['C', 'B', 'A']])
# orient='index'
- row_items = [(idx, self.mixed_frame.xs(idx))
- for idx in self.mixed_frame.index]
+ row_items = [(idx, float_string_frame.xs(idx))
+ for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
+ columns=float_string_frame.columns,
orient='index')
- tm.assert_frame_equal(recons, self.mixed_frame)
+ tm.assert_frame_equal(recons, float_string_frame)
assert recons['A'].dtype == np.float64
with tm.assert_raises_regex(TypeError,
@@ -1314,16 +1314,16 @@ def test_constructor_from_items(self):
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
- [('bar', 'baz')] * len(self.mixed_frame))
- self.mixed_frame['foo'] = arr
- row_items = [(idx, list(self.mixed_frame.xs(idx)))
- for idx in self.mixed_frame.index]
+ [('bar', 'baz')] * len(float_string_frame))
+ float_string_frame['foo'] = arr
+ row_items = [(idx, list(float_string_frame.xs(idx)))
+ for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
+ columns=float_string_frame.columns,
orient='index')
- tm.assert_frame_equal(recons, self.mixed_frame)
+ tm.assert_frame_equal(recons, float_string_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
@@ -1365,13 +1365,13 @@ def test_from_items_deprecation(self):
orient='index')
def test_constructor_mix_series_nonseries(self):
- df = DataFrame({'A': self.frame['A'],
- 'B': list(self.frame['B'])}, columns=['A', 'B'])
- tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
+ df = DataFrame({'A': float_frame['A'],
+ 'B': list(float_frame['B'])}, columns=['A', 'B'])
+ tm.assert_frame_equal(df, float_frame.loc[:, ['A', 'B']])
with tm.assert_raises_regex(ValueError, 'does not match '
'index length'):
- DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
+ DataFrame({'A': float_frame['A'], 'B': list(float_frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
@@ -1626,23 +1626,23 @@ def test_constructor_for_list_with_dtypes(self):
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
- cop = DataFrame(self.frame, copy=True)
+ cop = DataFrame(float_frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
- assert not (self.frame['A'] == 5).all()
+ assert not (float_frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
- df = DataFrame(self.frame.values)
+ df = DataFrame(float_frame.values)
- self.frame.values[5] = 5
+ float_frame.values[5] = 5
assert (df.values[5] == 5).all()
- df = DataFrame(self.frame.values, copy=True)
- self.frame.values[6] = 6
+ df = DataFrame(float_frame.values, copy=True)
+ float_frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
- series = self.frame._series
+ series = float_frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index c91370dc36770..dd6b2e04c15f0 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -395,10 +395,10 @@ def test_select_dtypes_typecodes(self):
assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self):
- self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
- result = self.mixed_frame.dtypes
+ float_string_frame['bool'] = float_string_frame['A'] > 0
+ result = float_string_frame.dtypes
expected = Series({k: v.dtype
- for k, v in compat.iteritems(self.mixed_frame)},
+ for k, v in compat.iteritems(float_string_frame)},
index=result.index)
assert_series_equal(result, expected)
@@ -409,7 +409,7 @@ def test_dtypes_gh8722(self):
assert_series_equal(result, Series({0: np.dtype('int64')}))
def test_ftypes(self):
- frame = self.mixed_float
+ frame = mixed_float_frame
expected = Series(dict(A='float32:dense',
B='float32:dense',
C='float16:dense',
@@ -418,23 +418,23 @@ def test_ftypes(self):
assert_series_equal(result, expected)
def test_astype(self):
- casted = self.frame.astype(int)
- expected = DataFrame(self.frame.values.astype(int),
- index=self.frame.index,
- columns=self.frame.columns)
+ casted = float_frame.astype(int)
+ expected = DataFrame(float_frame.values.astype(int),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
- casted = self.frame.astype(np.int32)
- expected = DataFrame(self.frame.values.astype(np.int32),
- index=self.frame.index,
- columns=self.frame.columns)
+ casted = float_frame.astype(np.int32)
+ expected = DataFrame(float_frame.values.astype(np.int32),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
- self.frame['foo'] = '5'
- casted = self.frame.astype(int)
- expected = DataFrame(self.frame.values.astype(int),
- index=self.frame.index,
- columns=self.frame.columns)
+ float_frame['foo'] = '5'
+ casted = float_frame.astype(int)
+ expected = DataFrame(float_frame.values.astype(int),
+ index=float_frame.index,
+ columns=float_frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
@@ -442,7 +442,7 @@ def _check_cast(df, v):
assert (list({s.dtype.name for
_, s in compat.iteritems(df)})[0] == v)
- mn = self.all_mixed._get_numeric_data().copy()
+ mn = mixed_type_frame._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
mn['big_float'] = np.array(123456789101112., dtype='float64')
@@ -452,13 +452,13 @@ def _check_cast(df, v):
casted = mn.astype('int64')
_check_cast(casted, 'int64')
- casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float32')
+ casted = mixed_float_frame.reindex(columns=['A', 'B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns=['little_float']).astype('float16')
_check_cast(casted, 'float16')
- casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float16')
+ casted = mixed_float_frame.reindex(columns=['A', 'B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
@@ -472,16 +472,16 @@ def _check_cast(df, v):
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
- df = self.frame.copy()
- expected = self.frame.astype(int)
+ df = float_frame.copy()
+ expected = float_frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
- df = self.frame.copy()
- expected = self.frame.astype(np.int32)
+ df = float_frame.copy()
+ expected = float_frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, errors='ignore')
@@ -490,18 +490,18 @@ def test_astype_with_exclude_string(self):
def test_astype_with_view(self):
- tf = self.mixed_float.reindex(columns=['A', 'B', 'C'])
+ tf = mixed_float_frame.reindex(columns=['A', 'B', 'C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
- tf = np.round(self.frame).astype(np.int32)
+ tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
- tf = self.frame.astype(np.float64)
+ tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@@ -853,7 +853,7 @@ class TestDataFrameDatetimeWithTZ(TestData):
def test_interleave(self):
# interleave with object
- result = self.tzframe.assign(D='foo').values
+ result = timezone_frame.assign(D='foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
@@ -869,7 +869,7 @@ def test_interleave(self):
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
- result = self.tzframe.values
+ result = timezone_frame.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
@@ -899,11 +899,11 @@ def test_astype(self):
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]],
dtype=object).T
- result = self.tzframe.astype(object)
+ result = timezone_frame.astype(object)
assert_frame_equal(result, DataFrame(
- expected, index=self.tzframe.index, columns=self.tzframe.columns))
+ expected, index=timezone_frame.index, columns=timezone_frame.columns))
- result = self.tzframe.astype('datetime64[ns]')
+ result = timezone_frame.astype('datetime64[ns]')
expected = DataFrame({'A': date_range('20130101', periods=3),
'B': (date_range('20130101', periods=3,
tz='US/Eastern')
@@ -919,17 +919,17 @@ def test_astype(self):
def test_astype_str(self):
# str formatting
- result = self.tzframe.astype(str)
+ result = timezone_frame.astype(str)
expected = DataFrame([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']],
- columns=self.tzframe.columns)
+ columns=timezone_frame.columns)
tm.assert_frame_equal(result, expected)
with option_context('display.max_columns', 20):
- result = str(self.tzframe)
+ result = str(timezone_frame)
assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
'2013-01-01 00:00:00+01:00') in result
assert ('1 2013-01-02 '
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 2b93af357481a..b28a2e73ea167 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -42,7 +42,7 @@ class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
- sl = self.frame[:20]
+ sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
@@ -50,14 +50,14 @@ def test_getitem(self):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
- for key, _ in compat.iteritems(self.frame._series):
- assert self.frame[key] is not None
+ for key, _ in compat.iteritems(float_frame._series):
+ assert float_frame[key] is not None
- assert 'random' not in self.frame
+ assert 'random' not in float_frame
with tm.assert_raises_regex(KeyError, 'random'):
- self.frame['random']
+ float_frame['random']
- df = self.frame.copy()
+ df = float_frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
@@ -75,12 +75,12 @@ def test_getitem_dupe_cols(self):
df[['baf']]
def test_get(self):
- b = self.frame.get('B')
- assert_series_equal(b, self.frame['B'])
+ b = float_frame.get('B')
+ assert_series_equal(b, float_frame['B'])
- assert self.frame.get('foo') is None
- assert_series_equal(self.frame.get('foo', self.frame['B']),
- self.frame['B'])
+ assert float_frame.get('foo') is None
+ assert_series_equal(float_frame.get('foo', float_frame['B']),
+ float_frame['B'])
@pytest.mark.parametrize("df", [
DataFrame(),
@@ -93,8 +93,8 @@ def test_get_none(self, df):
def test_loc_iterable(self):
idx = iter(['A', 'B', 'C'])
- result = self.frame.loc[:, idx]
- expected = self.frame.loc[:, ['A', 'B', 'C']]
+ result = float_frame.loc[:, idx]
+ expected = float_frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
@@ -108,7 +108,7 @@ def test_getitem_listlike(self, idx_type, levels):
# GH 21294
if levels == 1:
- frame, missing = self.frame, 'food'
+ frame, missing = float_frame, 'food'
else:
# MultiIndex columns
frame = DataFrame(randn(8, 3),
@@ -134,28 +134,28 @@ def test_getitem_listlike(self, idx_type, levels):
def test_getitem_callable(self):
# GH 12533
- result = self.frame[lambda x: 'A']
- tm.assert_series_equal(result, self.frame.loc[:, 'A'])
+ result = float_frame[lambda x: 'A']
+ tm.assert_series_equal(result, float_frame.loc[:, 'A'])
- result = self.frame[lambda x: ['A', 'B']]
- tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
+ result = float_frame[lambda x: ['A', 'B']]
+ tm.assert_frame_equal(result, float_frame.loc[:, ['A', 'B']])
- df = self.frame[:3]
+ df = float_frame[:3]
result = df[lambda x: [True, False, True]]
- tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
+ tm.assert_frame_equal(result, float_frame.iloc[[0, 2], :])
def test_setitem_list(self):
- self.frame['E'] = 'foo'
- data = self.frame[['A', 'B']]
- self.frame[['B', 'A']] = data
+ float_frame['E'] = 'foo'
+ data = float_frame[['A', 'B']]
+ float_frame[['B', 'A']] = data
- assert_series_equal(self.frame['B'], data['A'], check_names=False)
- assert_series_equal(self.frame['A'], data['B'], check_names=False)
+ assert_series_equal(float_frame['B'], data['A'], check_names=False)
+ assert_series_equal(float_frame['A'], data['B'], check_names=False)
with tm.assert_raises_regex(ValueError,
'Columns must be same length as key'):
- data[['A']] = self.frame[['A', 'B']]
+ data[['A']] = float_frame[['A', 'B']]
with tm.assert_raises_regex(ValueError, 'Length of values '
'does not match '
@@ -176,16 +176,16 @@ def test_setitem_list(self):
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
- data = np.random.randn(len(self.frame), 2)
- self.frame[['A', 'B']] = data
- assert_almost_equal(self.frame[['A', 'B']].values, data)
+ data = np.random.randn(len(float_frame), 2)
+ float_frame[['A', 'B']] = data
+ assert_almost_equal(float_frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
- tuples = lzip(self.frame['A'], self.frame['B'])
- self.frame['tuples'] = tuples
+ tuples = lzip(float_frame['A'], float_frame['B'])
+ float_frame['tuples'] = tuples
- result = self.frame['tuples']
- expected = Series(tuples, index=self.frame.index, name='tuples')
+ result = float_frame['tuples']
+ expected = Series(tuples, index=float_frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
@@ -234,27 +234,27 @@ def inc(x):
def test_getitem_boolean(self):
# boolean indexing
- d = self.tsframe.index[10]
- indexer = self.tsframe.index > d
+ d = datetime_frame.index[10]
+ indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
- subindex = self.tsframe.index[indexer]
- subframe = self.tsframe[indexer]
+ subindex = datetime_frame.index[indexer]
+ subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
- self.tsframe[indexer[:-1]]
+ datetime_frame[indexer[:-1]]
- subframe_obj = self.tsframe[indexer_obj]
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_raises_regex(ValueError, 'boolean values only'):
- self.tsframe[self.tsframe]
+ datetime_frame[datetime_frame]
# test that Series work
- indexer_obj = Series(indexer_obj, self.tsframe.index)
+ indexer_obj = Series(indexer_obj, datetime_frame.index)
- subframe_obj = self.tsframe[indexer_obj]
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
@@ -262,14 +262,14 @@ def test_getitem_boolean(self):
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
- indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
- subframe_obj = self.tsframe[indexer_obj]
+ indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
+ subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
- for df in [self.tsframe, self.mixed_frame,
- self.mixed_float, self.mixed_int]:
- if compat.PY3 and df is self.mixed_frame:
+ for df in [datetime_frame, float_string_frame,
+ mixed_float_frame, mixed_int_frame]:
+ if compat.PY3 and df is float_string_frame:
continue
data = df._get_numeric_data()
@@ -292,7 +292,7 @@ def test_getitem_boolean(self):
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
- df = self.tsframe.copy()
+ df = datetime_frame.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
@@ -385,23 +385,23 @@ def test_getitem_ix_mixed_integer(self):
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- result = self.frame.ix[:, -1]
- assert_series_equal(result, self.frame['D'])
+ result = float_frame.ix[:, -1]
+ assert_series_equal(result, float_frame['D'])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- result = self.frame.ix[:, [-1]]
- assert_frame_equal(result, self.frame[['D']])
+ result = float_frame.ix[:, [-1]]
+ assert_frame_equal(result, float_frame[['D']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- result = self.frame.ix[:, [-1, -2]]
- assert_frame_equal(result, self.frame[['D', 'C']])
+ result = float_frame.ix[:, [-1, -2]]
+ assert_frame_equal(result, float_frame[['D', 'C']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- self.frame.ix[:, [-1]] = 0
- assert (self.frame['D'] == 0).all()
+ float_frame.ix[:, [-1]] = 0
+ assert (float_frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
# ix does label-based indexing when having an integer index
@@ -428,8 +428,8 @@ def test_getitem_setitem_ix_negative_integers(self):
assert a.ix[-2].name == 'S'
def test_getattr(self):
- assert_series_equal(self.frame.A, self.frame['A'])
- pytest.raises(AttributeError, getattr, self.frame,
+ assert_series_equal(float_frame.A, float_frame['A'])
+ pytest.raises(AttributeError, getattr, float_frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
@@ -440,41 +440,41 @@ def test_setattr_column(self):
def test_setitem(self):
# not sure what else to do here
- series = self.frame['A'][::2]
- self.frame['col5'] = series
- assert 'col5' in self.frame
+ series = float_frame['A'][::2]
+ float_frame['col5'] = series
+ assert 'col5' in float_frame
assert len(series) == 15
- assert len(self.frame) == 30
+ assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
- exp = Series(exp, index=self.frame.index, name='col5')
- tm.assert_series_equal(self.frame['col5'], exp)
+ exp = Series(exp, index=float_frame.index, name='col5')
+ tm.assert_series_equal(float_frame['col5'], exp)
- series = self.frame['A']
- self.frame['col6'] = series
- tm.assert_series_equal(series, self.frame['col6'], check_names=False)
+ series = float_frame['A']
+ float_frame['col6'] = series
+ tm.assert_series_equal(series, float_frame['col6'], check_names=False)
with pytest.raises(KeyError):
- self.frame[randn(len(self.frame) + 1)] = 1
+ float_frame[randn(len(float_frame) + 1)] = 1
# set ndarray
- arr = randn(len(self.frame))
- self.frame['col9'] = arr
- assert (self.frame['col9'] == arr).all()
+ arr = randn(len(float_frame))
+ float_frame['col9'] = arr
+ assert (float_frame['col9'] == arr).all()
- self.frame['col7'] = 5
- assert((self.frame['col7'] == 5).all())
+ float_frame['col7'] = 5
+ assert((float_frame['col7'] == 5).all())
- self.frame['col0'] = 3.14
- assert((self.frame['col0'] == 3.14).all())
+ float_frame['col0'] = 3.14
+ assert((float_frame['col0'] == 3.14).all())
- self.frame['col8'] = 'foo'
- assert((self.frame['col8'] == 'foo').all())
+ float_frame['col8'] = 'foo'
+ assert((float_frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
- smaller = self.frame[:2]
+ smaller = float_frame[:2]
def f():
smaller['col10'] = ['1', '2']
@@ -494,26 +494,26 @@ def f():
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype):
- arr = randn(len(self.frame))
+ arr = randn(len(float_frame))
- self.frame[dtype] = np.array(arr, dtype=dtype)
- assert self.frame[dtype].dtype.name == dtype
+ float_frame[dtype] = np.array(arr, dtype=dtype)
+ assert float_frame[dtype].dtype.name == dtype
def test_setitem_tuple(self):
- self.frame['A', 'B'] = self.frame['A']
- assert_series_equal(self.frame['A', 'B'], self.frame[
+ float_frame['A', 'B'] = float_frame['A']
+ assert_series_equal(float_frame['A', 'B'], float_frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
- s = self.frame['A'].copy()
- self.frame['E'] = s
+ s = float_frame['A'].copy()
+ float_frame['E'] = s
- self.frame['E'][5:10] = nan
+ float_frame['E'][5:10] = nan
assert notna(s[5:10]).all()
def test_setitem_boolean(self):
- df = self.frame.copy()
- values = self.frame.values
+ df = float_frame.copy()
+ values = float_frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
@@ -569,7 +569,7 @@ def test_setitem_boolean(self):
def test_setitem_boolean_mask(self, mask_type):
# Test for issue #18582
- df = self.frame.copy()
+ df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
@@ -581,33 +581,33 @@ def test_setitem_boolean_mask(self, mask_type):
assert_frame_equal(result, expected)
def test_setitem_cast(self):
- self.frame['D'] = self.frame['D'].astype('i8')
- assert self.frame['D'].dtype == np.int64
+ float_frame['D'] = float_frame['D'].astype('i8')
+ assert float_frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
- self.frame['B'] = 0
- assert self.frame['B'].dtype == np.int64
+ float_frame['B'] = 0
+ assert float_frame['B'].dtype == np.int64
# cast if pass array of course
- self.frame['B'] = np.arange(len(self.frame))
- assert issubclass(self.frame['B'].dtype.type, np.integer)
+ float_frame['B'] = np.arange(len(float_frame))
+ assert issubclass(float_frame['B'].dtype.type, np.integer)
- self.frame['foo'] = 'bar'
- self.frame['foo'] = 0
- assert self.frame['foo'].dtype == np.int64
+ float_frame['foo'] = 'bar'
+ float_frame['foo'] = 0
+ assert float_frame['foo'].dtype == np.int64
- self.frame['foo'] = 'bar'
- self.frame['foo'] = 2.5
- assert self.frame['foo'].dtype == np.float64
+ float_frame['foo'] = 'bar'
+ float_frame['foo'] = 2.5
+ assert float_frame['foo'].dtype == np.float64
- self.frame['something'] = 0
- assert self.frame['something'].dtype == np.int64
- self.frame['something'] = 2
- assert self.frame['something'].dtype == np.int64
- self.frame['something'] = 2.5
- assert self.frame['something'].dtype == np.float64
+ float_frame['something'] = 0
+ assert float_frame['something'].dtype == np.int64
+ float_frame['something'] = 2
+ assert float_frame['something'].dtype == np.int64
+ float_frame['something'] = 2.5
+ assert float_frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
@@ -626,13 +626,13 @@ def test_setitem_cast(self):
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
- expected = self.frame.copy()
- mask = self.frame['A'] > 0
+ expected = float_frame.copy()
+ mask = float_frame['A'] > 0
- self.frame.loc[mask, 'B'] = 0
+ float_frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
- assert_frame_equal(self.frame, expected)
+ assert_frame_equal(float_frame, expected)
def test_frame_setitem_timestamp(self):
# GH#2155
@@ -661,7 +661,7 @@ def test_setitem_corner(self):
df[datetime.now()] = 5.
# what to do when empty frame with index
- dm = DataFrame(index=self.frame.index)
+ dm = DataFrame(index=float_frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
@@ -739,14 +739,14 @@ def test_setitem_clear_caches(self):
def test_setitem_None(self):
# GH #766
- self.frame[None] = self.frame['A']
+ float_frame[None] = float_frame['A']
assert_series_equal(
- self.frame.iloc[:, -1], self.frame['A'], check_names=False)
- assert_series_equal(self.frame.loc[:, None], self.frame[
+ float_frame.iloc[:, -1], float_frame['A'], check_names=False)
+ assert_series_equal(float_frame.loc[:, None], float_frame[
'A'], check_names=False)
- assert_series_equal(self.frame[None], self.frame[
+ assert_series_equal(float_frame[None], float_frame[
'A'], check_names=False)
- repr(self.frame)
+ repr(float_frame)
def test_setitem_empty(self):
# GH 9596
@@ -788,7 +788,7 @@ def test_getitem_empty_frame_with_boolean(self):
assert_frame_equal(df, df2)
def test_delitem_corner(self):
- f = self.frame.copy()
+ f = float_frame.copy()
del f['D']
assert len(f.columns) == 3
pytest.raises(KeyError, f.__delitem__, 'D')
@@ -796,14 +796,14 @@ def test_delitem_corner(self):
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
- f = self.frame
+ f = float_frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
- subidx = self.frame.index[[5, 4, 1]]
+ subidx = float_frame.index[[5, 4, 1]]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[subidx, ['B', 'A']],
@@ -891,7 +891,7 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
def test_setitem_fancy_2d(self):
# case 1
- frame = self.frame.copy()
+ frame = float_frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
@@ -902,12 +902,12 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame, expected)
# case 2
- frame = self.frame.copy()
- frame2 = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
expected = frame.copy()
- subidx = self.frame.index[[5, 4, 1]]
+ subidx = float_frame.index[[5, 4, 1]]
values = randn(3, 2)
with catch_warnings(record=True):
@@ -922,18 +922,18 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
- frame = self.frame.copy()
+ frame = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- expected1 = self.frame.copy()
+ expected1 = float_frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- expected2 = self.frame.copy()
+ expected2 = float_frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
@@ -942,7 +942,7 @@ def test_setitem_fancy_2d(self):
# case 4
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame = self.frame.copy()
+ frame = float_frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
@@ -951,10 +951,10 @@ def test_setitem_fancy_2d(self):
# case 5
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame = self.frame.copy()
- frame2 = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
- expected = self.frame.copy()
+ expected = float_frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
@@ -970,8 +970,8 @@ def test_setitem_fancy_2d(self):
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
@@ -980,9 +980,9 @@ def test_setitem_fancy_2d(self):
# case 7: slice columns
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame = self.frame.copy()
- frame2 = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ frame2 = float_frame.copy()
+ expected = float_frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
@@ -1004,17 +1004,17 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
- sliced = self.mixed_frame.iloc[:, -3:]
+ sliced = float_string_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
- sliced = self.frame.iloc[:, -3:]
+ sliced = float_frame.iloc[:, -3:]
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
- assert (self.frame['C'] == 4).all()
+ assert (float_frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
@@ -1086,28 +1086,28 @@ def test_fancy_index_int_labels_exceptions(self):
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
- pytest.raises(KeyError, self.frame.ix.__setitem__,
+ pytest.raises(KeyError, float_frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
- pytest.raises(KeyError, self.frame.ix.__setitem__,
+ pytest.raises(KeyError, float_frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
- # pytest.raises(KeyError, self.frame.ix.__setitem__,
+ # pytest.raises(KeyError, float_frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
- result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
+ float_string_frame.ix[:5, ['C', 'B', 'A']] = 5
+ result = float_string_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
- self.mixed_frame.ix[5] = np.nan
- assert isna(self.mixed_frame.ix[5]).all()
+ float_string_frame.ix[5] = np.nan
+ assert isna(float_string_frame.ix[5]).all()
- self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
- assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
+ float_string_frame.ix[5] = float_string_frame.ix[6]
+ assert_series_equal(float_string_frame.ix[5], float_string_frame.ix[6],
check_names=False)
# #1432
@@ -1267,7 +1267,7 @@ def test_ix_dup(self):
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
- f = self.frame
+ f = float_frame
# return self if no slicing...for now
with catch_warnings(record=True):
@@ -1322,15 +1322,15 @@ def test_getitem_fancy_1d(self):
# slice of mixed-frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- xs = self.mixed_frame.ix[5]
- exp = self.mixed_frame.xs(self.mixed_frame.index[5])
+ xs = float_string_frame.ix[5]
+ exp = float_string_frame.xs(float_string_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
@@ -1342,13 +1342,13 @@ def test_setitem_fancy_1d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame2 = self.frame.copy()
+ frame2 = float_frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
@@ -1359,13 +1359,13 @@ def test_setitem_fancy_1d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- frame2 = self.frame.copy()
+ frame2 = float_frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
@@ -1380,8 +1380,8 @@ def test_setitem_fancy_1d(self):
assert_frame_equal(frame, expected)
# single column
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
@@ -1390,7 +1390,7 @@ def test_setitem_fancy_1d(self):
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
- f = self.frame
+ f = float_frame
ix = f.loc
# individual value
@@ -1400,8 +1400,8 @@ def test_getitem_fancy_scalar(self):
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
- f = self.frame
- expected = self.frame.copy()
+ f = float_frame
+ expected = float_frame.copy()
ix = f.loc
# individual value
@@ -1416,7 +1416,7 @@ def test_setitem_fancy_scalar(self):
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
- f = self.frame
+ f = float_frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
@@ -1441,31 +1441,31 @@ def test_getitem_fancy_boolean(self):
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
- frame = self.frame.copy()
- expected = self.frame.copy()
+ frame = float_frame.copy()
+ expected = float_frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
- result = self.frame.iloc[[1, 4, 7]]
- expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
+ result = float_frame.iloc[[1, 4, 7]]
+ expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
- result = self.frame.iloc[:, [2, 0, 1]]
- expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
+ result = float_frame.iloc[:, [2, 0, 1]]
+ expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
- ix = self.frame.iloc
+ ix = float_frame.iloc
with tm.assert_raises_regex(IndexingError, 'Too many indexers'):
ix[:, :, :]
@@ -1474,14 +1474,14 @@ def test_getitem_setitem_fancy_exceptions(self):
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
- mask = self.frame['A'][::-1] > 1
+ mask = float_frame['A'][::-1] > 1
- result = self.frame.loc[mask]
- expected = self.frame.loc[mask[::-1]]
+ result = float_frame.loc[mask]
+ expected = float_frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
- cp = self.frame.copy()
- expected = self.frame.copy()
+ cp = float_frame.copy()
+ expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
@@ -1642,16 +1642,16 @@ def test_setitem_mixed_datetime(self):
assert_frame_equal(df, expected)
def test_setitem_frame(self):
- piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
- self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
- result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
+ piece = float_frame.loc[float_frame.index[:2], ['A', 'B']]
+ float_frame.loc[float_frame.index[-2]:, ['A', 'B']] = piece.values
+ result = float_frame.loc[float_frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
@@ -1660,7 +1660,7 @@ def test_setitem_frame(self):
piece.values)
# rows unaligned
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
@@ -1670,7 +1670,7 @@ def test_setitem_frame(self):
piece.values[0:2])
# key is unaligned with values
- f = self.mixed_frame.copy()
+ f = float_string_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
@@ -1680,8 +1680,8 @@ def test_setitem_frame(self):
piece.values)
# ndarray
- f = self.mixed_frame.copy()
- piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
+ f = float_string_frame.copy()
+ piece = float_string_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
@@ -1697,11 +1697,11 @@ def test_setitem_frame(self):
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
- piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
- piece.index = self.frame.index[-2:]
+ piece = float_frame.loc[float_frame.index[:2], ['A', 'B']]
+ piece.index = float_frame.index[-2:]
piece.columns = ['A', 'B']
- self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
- result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
+ float_frame.loc[float_frame.index[-2:], ['A', 'B']] = piece
+ result = float_frame.loc[float_frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
@@ -1761,12 +1761,12 @@ def test_getitem_list_duplicates(self):
assert_frame_equal(result, expected)
def test_get_value(self):
- for idx in self.frame.index:
- for col in self.frame.columns:
+ for idx in float_frame.index:
+ for col in float_frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- result = self.frame.get_value(idx, col)
- expected = self.frame[col][idx]
+ result = float_frame.get_value(idx, col)
+ expected = float_frame[col][idx]
assert result == expected
def test_lookup(self):
@@ -1785,8 +1785,8 @@ def testit(df):
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
- testit(self.mixed_frame)
- testit(self.frame)
+ testit(float_string_frame)
+ testit(float_frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
@@ -1798,51 +1798,51 @@ def testit(df):
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
- self.frame.lookup(['xyz'], ['A'])
+ float_frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
- self.frame.lookup([self.frame.index[0]], ['xyz'])
+ float_frame.lookup([float_frame.index[0]], ['xyz'])
with tm.assert_raises_regex(ValueError, 'same size'):
- self.frame.lookup(['a', 'b', 'c'], ['a'])
+ float_frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
- for idx in self.frame.index:
- for col in self.frame.columns:
+ for idx in float_frame.index:
+ for col in float_frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- self.frame.set_value(idx, col, 1)
- assert self.frame[col][idx] == 1
+ float_frame.set_value(idx, col, 1)
+ assert float_frame[col][idx] == 1
def test_set_value_resize(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- res = self.frame.set_value('foobar', 'B', 0)
- assert res is self.frame
+ res = float_frame.set_value('foobar', 'B', 0)
+ assert res is float_frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 0
- self.frame.loc['foobar', 'qux'] = 0
+ float_frame.loc['foobar', 'qux'] = 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
- assert self.frame.get_value('foobar', 'qux') == 0
+ assert float_frame.get_value('foobar', 'qux') == 0
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
- res = self.frame.copy()
+ res = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 5)
@@ -1891,15 +1891,15 @@ def test_get_set_value_no_partial_indexing(self):
pytest.raises(KeyError, df.get_value, 0, 1)
def test_single_element_ix_dont_upcast(self):
- self.frame['E'] = 1
- assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
+ float_frame['E'] = 1
+ assert issubclass(float_frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
- result = self.frame.ix[self.frame.index[5], 'E']
+ result = float_frame.ix[float_frame.index[5], 'E']
assert is_integer(result)
- result = self.frame.loc[self.frame.index[5], 'E']
+ result = float_frame.loc[float_frame.index[5], 'E']
assert is_integer(result)
# GH 11617
@@ -2063,10 +2063,10 @@ def test_iloc_sparse_propegate_fill_value(self):
def test_iat(self):
- for i, row in enumerate(self.frame.index):
- for j, col in enumerate(self.frame.columns):
- result = self.frame.iat[i, j]
- expected = self.frame.at[row, col]
+ for i, row in enumerate(float_frame.index):
+ for j, col in enumerate(float_frame.columns):
+ result = float_frame.iat[i, j]
+ expected = float_frame.at[row, col]
assert result == expected
def test_nested_exception(self):
@@ -2396,13 +2396,13 @@ def test_at_time_between_time_datetimeindex(self):
assert_frame_equal(result, df)
def test_xs(self):
- idx = self.frame.index[5]
- xs = self.frame.xs(idx)
+ idx = float_frame.index[5]
+ xs = float_frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
- assert np.isnan(self.frame[item][idx])
+ assert np.isnan(float_frame[item][idx])
else:
- assert value == self.frame[item][idx]
+ assert value == float_frame[item][idx]
# mixed-type xs
test_data = {
@@ -2416,15 +2416,15 @@ def test_xs(self):
assert xs['B'] == '1'
with pytest.raises(KeyError):
- self.tsframe.xs(self.tsframe.index[0] - BDay())
+ datetime_frame.xs(datetime_frame.index[0] - BDay())
# xs get column
- series = self.frame.xs('A', axis=1)
- expected = self.frame['A']
+ series = float_frame.xs('A', axis=1)
+ expected = float_frame['A']
assert_series_equal(series, expected)
# view is returned if possible
- series = self.frame.xs('A', axis=1)
+ series = float_frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
@@ -2579,9 +2579,9 @@ def _check_get(df, cond, check_dtypes=True):
assert (rs.dtypes == df.dtypes).all()
# check getting
- for df in [default_frame, self.mixed_frame,
- self.mixed_float, self.mixed_int]:
- if compat.PY3 and df is self.mixed_frame:
+ for df in [default_frame, float_string_frame,
+ mixed_float_frame, mixed_int_frame]:
+ if compat.PY3 and df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -2631,8 +2631,8 @@ def _check_align(df, cond, other, check_dtypes=True):
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
- for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
- if compat.PY3 and df is self.mixed_frame:
+ for df in [float_string_frame, mixed_float_frame, mixed_int_frame]:
+ if compat.PY3 and df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -2679,9 +2679,9 @@ def _check_set(df, cond, check_dtypes=True):
v = np.dtype('float64')
assert dfi[k].dtype == v
- for df in [default_frame, self.mixed_frame, self.mixed_float,
- self.mixed_int]:
- if compat.PY3 and df is self.mixed_frame:
+ for df in [default_frame, float_string_frame, mixed_float_frame,
+ mixed_int_frame]:
+ if compat.PY3 and df is float_string_frame:
with pytest.raises(TypeError):
df > 0
continue
@@ -3122,19 +3122,19 @@ def test_mask_callable(self):
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
- assert_frame_equal(self.frame.head(), self.frame[:5])
- assert_frame_equal(self.frame.tail(), self.frame[-5:])
+ assert_frame_equal(float_frame.head(), float_frame[:5])
+ assert_frame_equal(float_frame.tail(), float_frame[-5:])
- assert_frame_equal(self.frame.head(0), self.frame[0:0])
- assert_frame_equal(self.frame.tail(0), self.frame[0:0])
+ assert_frame_equal(float_frame.head(0), float_frame[0:0])
+ assert_frame_equal(float_frame.tail(0), float_frame[0:0])
- assert_frame_equal(self.frame.head(-1), self.frame[:-1])
- assert_frame_equal(self.frame.tail(-1), self.frame[1:])
- assert_frame_equal(self.frame.head(1), self.frame[:1])
- assert_frame_equal(self.frame.tail(1), self.frame[-1:])
+ assert_frame_equal(float_frame.head(-1), float_frame[:-1])
+ assert_frame_equal(float_frame.tail(-1), float_frame[1:])
+ assert_frame_equal(float_frame.head(1), float_frame[:1])
+ assert_frame_equal(float_frame.tail(1), float_frame[-1:])
# with a float index
- df = self.frame.copy()
- df.index = np.arange(len(self.frame)) + 0.1
+ df = float_frame.copy()
+ df.index = np.arange(len(float_frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 136299a4b81be..f4b602b54e55f 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -42,12 +42,12 @@ def _skip_if_no_pchip():
class TestDataFrameMissingData(TestData):
def test_dropEmptyRows(self):
- N = len(self.frame.index)
+ N = len(float_frame.index)
mat = random.randn(N)
mat[:5] = nan
- frame = DataFrame({'foo': mat}, index=self.frame.index)
- original = Series(mat, index=self.frame.index, name='foo')
+ frame = DataFrame({'foo': mat}, index=float_frame.index)
+ original = Series(mat, index=float_frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
@@ -64,20 +64,20 @@ def test_dropEmptyRows(self):
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
- N = len(self.frame.index)
+ N = len(float_frame.index)
mat = random.randn(N)
mat[:5] = nan
- frame = DataFrame({'foo': mat}, index=self.frame.index)
+ frame = DataFrame({'foo': mat}, index=float_frame.index)
frame['bar'] = 5
- original = Series(mat, index=self.frame.index, name='foo')
+ original = Series(mat, index=float_frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
- exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
+ exp = Series(mat[5:], index=float_frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
@@ -85,8 +85,8 @@ def test_dropIncompleteRows(self):
assert_series_equal(frame['foo'], original)
assert (frame['bar'] == 5).all()
inp_frame2.dropna(subset=['bar'], inplace=True)
- tm.assert_index_equal(samesize_frame.index, self.frame.index)
- tm.assert_index_equal(inp_frame2.index, self.frame.index)
+ tm.assert_index_equal(samesize_frame.index, float_frame.index)
+ tm.assert_index_equal(inp_frame2.index, float_frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
@@ -163,10 +163,10 @@ def test_drop_and_dropna_caching(self):
def test_dropna_corner(self):
# bad input
- pytest.raises(ValueError, self.frame.dropna, how='foo')
- pytest.raises(TypeError, self.frame.dropna, how=None)
+ pytest.raises(ValueError, float_frame.dropna, how='foo')
+ pytest.raises(TypeError, float_frame.dropna, how=None)
# non-existent column - 8303
- pytest.raises(KeyError, self.frame.dropna, subset=['A', 'X'])
+ pytest.raises(KeyError, float_frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
@@ -212,30 +212,30 @@ def test_dropna_tz_aware_datetime(self):
assert_frame_equal(result, expected)
def test_fillna(self):
- tf = self.tsframe
+ tf = datetime_frame
tf.loc[tf.index[:5], 'A'] = nan
tf.loc[tf.index[-5:], 'A'] = nan
- zero_filled = self.tsframe.fillna(0)
+ zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all()
- padded = self.tsframe.fillna(method='pad')
+ padded = datetime_frame.fillna(method='pad')
assert np.isnan(padded.loc[padded.index[:5], 'A']).all()
assert (padded.loc[padded.index[-5:], 'A'] ==
padded.loc[padded.index[-5], 'A']).all()
# mixed type
- mf = self.mixed_frame
+ mf = float_string_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
- result = self.mixed_frame.fillna(value=0)
- result = self.mixed_frame.fillna(method='pad')
+ result = float_string_frame.fillna(value=0)
+ result = float_string_frame.fillna(method='pad')
- pytest.raises(ValueError, self.tsframe.fillna)
- pytest.raises(ValueError, self.tsframe.fillna, 5, method='ffill')
+ pytest.raises(ValueError, datetime_frame.fillna)
+ pytest.raises(ValueError, datetime_frame.fillna, 5, method='ffill')
# mixed numeric (but no float16)
- mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
+ mf = mixed_float_frame.reindex(columns=['A', 'B', 'D'])
mf.loc[mf.index[-10:], 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
@@ -457,18 +457,18 @@ def test_fillna_datetime_columns(self):
tm.assert_frame_equal(result, expected)
def test_ffill(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
+ datetime_frame['A'][:5] = nan
+ datetime_frame['A'][-5:] = nan
- assert_frame_equal(self.tsframe.ffill(),
- self.tsframe.fillna(method='ffill'))
+ assert_frame_equal(datetime_frame.ffill(),
+ datetime_frame.fillna(method='ffill'))
def test_bfill(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
+ datetime_frame['A'][:5] = nan
+ datetime_frame['A'][-5:] = nan
- assert_frame_equal(self.tsframe.bfill(),
- self.tsframe.fillna(method='bfill'))
+ assert_frame_equal(datetime_frame.bfill(),
+ datetime_frame.fillna(method='bfill'))
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
@@ -597,15 +597,15 @@ def test_fillna_columns(self):
def test_fillna_invalid_method(self):
with tm.assert_raises_regex(ValueError, 'ffil'):
- self.frame.fillna(method='ffil')
+ float_frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
- pytest.raises(TypeError, self.frame.fillna, [1, 2])
+ pytest.raises(TypeError, float_frame.fillna, [1, 2])
# tuple
- pytest.raises(TypeError, self.frame.fillna, (1, 2))
+ pytest.raises(TypeError, float_frame.fillna, (1, 2))
# frame with series
- pytest.raises(TypeError, self.frame.iloc[:, 0].fillna, self.frame)
+ pytest.raises(TypeError, float_frame.iloc[:, 0].fillna, float_frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
@@ -615,15 +615,15 @@ def test_fillna_col_reordering(self):
assert df.columns.tolist() == filled.columns.tolist()
def test_fill_corner(self):
- mf = self.mixed_frame
+ mf = float_string_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
- filled = self.mixed_frame.fillna(value=0)
+ filled = float_string_frame.fillna(value=0)
assert (filled.loc[filled.index[5:20], 'foo'] == 0).all()
- del self.mixed_frame['foo']
+ del float_string_frame['foo']
- empty_float = self.frame.reindex(columns=[])
+ empty_float = float_frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 51ffe2966b4e5..8b64d0621b673 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -195,8 +195,8 @@ def test_insert(self):
assert_frame_equal(df, exp)
def test_delitem(self):
- del self.frame['A']
- assert 'A' not in self.frame
+ del float_frame['A']
+ assert 'A' not in float_frame
def test_delitem_multiindex(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
@@ -225,15 +225,15 @@ def test_delitem_multiindex(self):
del df['A']
def test_pop(self):
- self.frame.columns.name = 'baz'
+ float_frame.columns.name = 'baz'
- self.frame.pop('A')
- assert 'A' not in self.frame
+ float_frame.pop('A')
+ assert 'A' not in float_frame
- self.frame['foo'] = 'bar'
- self.frame.pop('foo')
- assert 'foo' not in self.frame
- # TODO assert self.frame.columns.name == 'baz'
+ float_frame['foo'] = 'bar'
+ float_frame.pop('foo')
+ assert 'foo' not in float_frame
+ # TODO assert float_frame.columns.name == 'baz'
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 97c94e1134cc8..a3e57c5497fc6 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -213,7 +213,7 @@ def _check_unary_op(op):
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
- result = getattr(self.frame, op)('foo')
+ result = getattr(float_frame, op)('foo')
assert bool(result.all().all()) is res
def test_logical_with_nas(self):
@@ -267,7 +267,7 @@ def test_neg_raises(self, df):
(- df['a'])
def test_invert(self):
- assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
+ assert_frame_equal(-(float_frame < 0), ~(float_frame < 0))
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': [-1, 1]}),
@@ -528,7 +528,7 @@ def test_dti_tz_convert_to_utc(self):
assert_frame_equal(df1 + df2, exp)
def test_arith_non_pandas_object(self):
- df = self.simple
+ df = simple_frame
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
@@ -561,15 +561,15 @@ def test_arith_alignment_non_pandas_object(self, values):
assert_frame_equal(result, expected)
def test_combineFrame(self):
- frame_copy = self.frame.reindex(self.frame.index[::2])
+ frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
- added = self.frame + frame_copy
+ added = float_frame + frame_copy
indexer = added['A'].dropna().index
- exp = (self.frame['A'] * 2).copy()
+ exp = (float_frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
@@ -582,95 +582,95 @@ def test_combineFrame(self):
assert np.isnan(added['D']).all()
- self_added = self.frame + self.frame
- tm.assert_index_equal(self_added.index, self.frame.index)
+ self_added = float_frame + float_frame
+ tm.assert_index_equal(self_added.index, float_frame.index)
- added_rev = frame_copy + self.frame
+ added_rev = frame_copy + float_frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
- plus_empty = self.frame + self.empty
+ plus_empty = float_frame + empty_frame
assert np.isnan(plus_empty.values).all()
- empty_plus = self.empty + self.frame
+ empty_plus = empty_frame + float_frame
assert np.isnan(empty_plus.values).all()
- empty_empty = self.empty + self.empty
+ empty_empty = empty_frame + empty_frame
assert empty_empty.empty
# out of order
- reverse = self.frame.reindex(columns=self.frame.columns[::-1])
+ reverse = float_frame.reindex(columns=float_frame.columns[::-1])
- assert_frame_equal(reverse + self.frame, self.frame * 2)
+ assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
- added = self.frame + self.mixed_float
+ added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype='float64')
- added = self.mixed_float + self.frame
+ added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
- added = self.mixed_float + self.mixed_float2
+ added = mixed_float_frame + mixed_float_frame2
_check_mixed_float(added, dtype=dict(C=None))
- added = self.mixed_float2 + self.mixed_float
+ added = mixed_float_frame2 + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
- added = self.frame + self.mixed_int
+ added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
- series = self.frame.xs(self.frame.index[0])
+ series = float_frame.xs(float_frame.index[0])
- added = self.frame + series
+ added = float_frame + series
for key, s in compat.iteritems(added):
- assert_series_equal(s, self.frame[key] + series[key])
+ assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
- larger_added = self.frame + larger_series
+ larger_added = float_frame + larger_series
- for key, s in compat.iteritems(self.frame):
+ for key, s in compat.iteritems(float_frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
- added = self.mixed_float + series
+ added = mixed_float_frame + series
_check_mixed_float(added)
# vs mix (upcast) as needed
- added = self.mixed_float + series.astype('float32')
+ added = mixed_float_frame + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
- added = self.mixed_float + series.astype('float16')
+ added = mixed_float_frame + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
- # added = self.mixed_int + (100*series).astype('int64')
+ # added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
- # added = self.mixed_int + (100*series).astype('int32')
+ # added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
- ts = self.tsframe['A']
+ ts = datetime_frame['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
- added = self.tsframe.add(ts, axis='index')
+ added = datetime_frame.add(ts, axis='index')
- for key, col in compat.iteritems(self.tsframe):
+ for key, col in compat.iteritems(datetime_frame):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
@@ -679,52 +679,52 @@ def test_combineSeries(self):
else:
assert result.name is None
- smaller_frame = self.tsframe[:-5]
+ smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
- tm.assert_index_equal(smaller_added.index, self.tsframe.index)
+ tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
- smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
+ smaller_added2 = datetime_frame.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
- result = self.tsframe.add(ts[:0], axis='index')
- expected = DataFrame(np.nan, index=self.tsframe.index,
- columns=self.tsframe.columns)
+ result = datetime_frame.add(ts[:0], axis='index')
+ expected = DataFrame(np.nan, index=datetime_frame.index,
+ columns=datetime_frame.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
- result = self.tsframe[:0].add(ts, axis='index')
- expected = DataFrame(np.nan, index=self.tsframe.index,
- columns=self.tsframe.columns)
+ result = datetime_frame[:0].add(ts, axis='index')
+ expected = DataFrame(np.nan, index=datetime_frame.index,
+ columns=datetime_frame.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
- frame = self.tsframe[:1].reindex(columns=[])
+ frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
- result = self.frame * 2
- tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
+ result = float_frame * 2
+ tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
- result = self.mixed_float * 2
+ result = mixed_float_frame * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
- s.values, self.mixed_float[c].values * 2)
+ s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
- result = self.empty * 2
- assert result.index is self.empty.index
+ result = empty_frame * 2
+ assert result.index is empty_frame.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
- row = self.simple.xs('a')
+ row = simple_frame.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
@@ -735,18 +735,18 @@ def test_comp(func):
'Wrong number of dimensions'):
func(df1, ndim_5)
- result2 = func(self.simple, row)
+ result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(result2.values,
- func(self.simple.values, row.values))
+ func(simple_frame.values, row.values))
- result3 = func(self.frame, 0)
+ result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values,
- func(self.frame.values, 0))
+ func(float_frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
- func(self.simple, self.simple[:2])
+ func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
@@ -836,8 +836,8 @@ def test_boolean_comparison(self):
assert_frame_equal(result, expected)
def test_combine_generic(self):
- df1 = self.frame
- df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
+ df1 = float_frame
+ df2 = float_frame.loc[float_frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
@@ -847,7 +847,7 @@ def test_combine_generic(self):
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
- exp = self.frame.loc[self.frame.index[:-5],
+ exp = float_frame.loc[float_frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 2f264874378bc..5583c889e83e4 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -21,14 +21,14 @@ class TestDataFrameQuantile(TestData):
def test_quantile(self):
from numpy import percentile
- q = self.tsframe.quantile(0.1, axis=0)
- assert q['A'] == percentile(self.tsframe['A'], 10)
- tm.assert_index_equal(q.index, self.tsframe.columns)
+ q = datetime_frame.quantile(0.1, axis=0)
+ assert q['A'] == percentile(datetime_frame['A'], 10)
+ tm.assert_index_equal(q.index, datetime_frame.columns)
- q = self.tsframe.quantile(0.9, axis=1)
+ q = datetime_frame.quantile(0.9, axis=1)
assert (q['2000-01-17'] ==
- percentile(self.tsframe.loc['2000-01-17'], 90))
- tm.assert_index_equal(q.index, self.tsframe.index)
+ percentile(datetime_frame.loc['2000-01-17'], 90))
+ tm.assert_index_equal(q.index, datetime_frame.index)
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
@@ -105,14 +105,14 @@ def test_quantile_interpolation(self):
from numpy import percentile
# interpolation = linear (default case)
- q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
- assert q['A'] == percentile(self.tsframe['A'], 10)
- q = self.intframe.quantile(0.1)
- assert q['A'] == percentile(self.intframe['A'], 10)
+ q = datetime_frame.quantile(0.1, axis=0, interpolation='linear')
+ assert q['A'] == percentile(datetime_frame['A'], 10)
+ q = int_frame.quantile(0.1)
+ assert q['A'] == percentile(int_frame['A'], 10)
# test with and without interpolation keyword
- q1 = self.intframe.quantile(0.1)
- assert q1['A'] == np.percentile(self.intframe['A'], 10)
+ q1 = int_frame.quantile(0.1)
+ assert q1['A'] == np.percentile(int_frame['A'], 10)
tm.assert_series_equal(q, q1)
# interpolation method other than default linear
@@ -225,7 +225,7 @@ def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
- self.tsframe.quantile(invalid)
+ datetime_frame.quantile(invalid)
def test_quantile_box(self):
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 3c6f0f0b2ab94..454ebb2f79661 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -1019,19 +1019,19 @@ def test_query_string_scalar_variable(self, parser, engine):
class TestDataFrameEvalWithFrame(object):
def setup_method(self, method):
- self.frame = DataFrame(randn(10, 3), columns=list('abc'))
+ float_frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
- del self.frame
+ del float_frame
def test_simple_expr(self, parser, engine):
- res = self.frame.eval('a + b', engine=engine, parser=parser)
- expect = self.frame.a + self.frame.b
+ res = float_frame.eval('a + b', engine=engine, parser=parser)
+ expect = float_frame.a + float_frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
- res = self.frame.eval('a[a < 1] + b', engine=engine, parser=parser)
- expect = self.frame.a[self.frame.a < 1] + self.frame.b
+ res = float_frame.eval('a[a < 1] + b', engine=engine, parser=parser)
+ expect = float_frame.a[float_frame.a < 1] + float_frame.b
assert_series_equal(res, expect)
@pytest.mark.parametrize('op', ['+', '-', '*', '/'])
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 3134686c2a2d9..a01f1ef6734ac 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -35,16 +35,16 @@ def method(self, request):
def test_rank(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
- self.frame['A'][::2] = np.nan
- self.frame['B'][::3] = np.nan
- self.frame['C'][::4] = np.nan
- self.frame['D'][::5] = np.nan
+ float_frame['A'][::2] = np.nan
+ float_frame['B'][::3] = np.nan
+ float_frame['C'][::4] = np.nan
+ float_frame['D'][::5] = np.nan
- ranks0 = self.frame.rank()
- ranks1 = self.frame.rank(1)
- mask = np.isnan(self.frame.values)
+ ranks0 = float_frame.rank()
+ ranks1 = float_frame.rank(1)
+ mask = np.isnan(float_frame.values)
- fvals = self.frame.fillna(np.inf).values
+ fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
@@ -113,11 +113,11 @@ def test_rank2(self):
tm.assert_frame_equal(result, expected)
# mixed-type frames
- self.mixed_frame['datetime'] = datetime.now()
- self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
+ float_string_frame['datetime'] = datetime.now()
+ float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
- result = self.mixed_frame.rank(1)
- expected = self.mixed_frame.rank(1, numeric_only=True)
+ result = float_string_frame.rank(1)
+ expected = float_string_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
@@ -128,16 +128,16 @@ def test_rank2(self):
def test_rank_na_option(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
- self.frame['A'][::2] = np.nan
- self.frame['B'][::3] = np.nan
- self.frame['C'][::4] = np.nan
- self.frame['D'][::5] = np.nan
+ float_frame['A'][::2] = np.nan
+ float_frame['B'][::3] = np.nan
+ float_frame['C'][::4] = np.nan
+ float_frame['D'][::5] = np.nan
# bottom
- ranks0 = self.frame.rank(na_option='bottom')
- ranks1 = self.frame.rank(1, na_option='bottom')
+ ranks0 = float_frame.rank(na_option='bottom')
+ ranks1 = float_frame.rank(1, na_option='bottom')
- fvals = self.frame.fillna(np.inf).values
+ fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
@@ -146,11 +146,11 @@ def test_rank_na_option(self):
tm.assert_almost_equal(ranks1.values, exp1)
# top
- ranks0 = self.frame.rank(na_option='top')
- ranks1 = self.frame.rank(1, na_option='top')
+ ranks0 = float_frame.rank(na_option='top')
+ ranks1 = float_frame.rank(1, na_option='top')
- fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
- fval1 = self.frame.T
+ fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
+ fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
@@ -163,10 +163,10 @@ def test_rank_na_option(self):
# descending
# bottom
- ranks0 = self.frame.rank(na_option='top', ascending=False)
- ranks1 = self.frame.rank(1, na_option='top', ascending=False)
+ ranks0 = float_frame.rank(na_option='top', ascending=False)
+ ranks1 = float_frame.rank(1, na_option='top', ascending=False)
- fvals = self.frame.fillna(np.inf).values
+ fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
@@ -177,11 +177,11 @@ def test_rank_na_option(self):
# descending
# top
- ranks0 = self.frame.rank(na_option='bottom', ascending=False)
- ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
+ ranks0 = float_frame.rank(na_option='bottom', ascending=False)
+ ranks1 = float_frame.rank(1, na_option='bottom', ascending=False)
- fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
- fval1 = self.frame.T
+ fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
+ fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
@@ -195,11 +195,11 @@ def test_rank_na_option(self):
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with tm.assert_raises_regex(ValueError, msg):
- self.frame.rank(na_option='bad', ascending=False)
+ float_frame.rank(na_option='bad', ascending=False)
# invalid type
with tm.assert_raises_regex(ValueError, msg):
- self.frame.rank(na_option=True, ascending=False)
+ float_frame.rank(na_option=True, ascending=False)
def test_rank_axis(self):
# check if using axes' names gives the same result
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 49dba1c769572..200e11624d27b 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -26,25 +26,25 @@
class TestDataFrameReplace(TestData):
def test_replace_inplace(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
+ datetime_frame['A'][:5] = nan
+ datetime_frame['A'][-5:] = nan
- tsframe = self.tsframe.copy()
+ tsframe = datetime_frame.copy()
tsframe.replace(nan, 0, inplace=True)
- assert_frame_equal(tsframe, self.tsframe.fillna(0))
+ assert_frame_equal(tsframe, datetime_frame.fillna(0))
# mixed type
- mf = self.mixed_frame
+ mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
- result = self.mixed_frame.replace(np.nan, 0)
- expected = self.mixed_frame.fillna(value=0)
+ result = float_string_frame.replace(np.nan, 0)
+ expected = float_string_frame.fillna(value=0)
assert_frame_equal(result, expected)
- tsframe = self.tsframe.copy()
+ tsframe = datetime_frame.copy()
tsframe.replace([nan], [0], inplace=True)
- assert_frame_equal(tsframe, self.tsframe.fillna(0))
+ assert_frame_equal(tsframe, datetime_frame.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
@@ -555,16 +555,16 @@ def test_replace_regex_metachar(self, metachar):
assert_frame_equal(result, expected)
def test_replace(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
+ datetime_frame['A'][:5] = nan
+ datetime_frame['A'][-5:] = nan
- zero_filled = self.tsframe.replace(nan, -1e8)
- assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
- assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
+ zero_filled = datetime_frame.replace(nan, -1e8)
+ assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))
+ assert_frame_equal(zero_filled.replace(-1e8, nan), datetime_frame)
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
- self.tsframe['B'][:5] = -1e8
+ datetime_frame['A'][:5] = nan
+ datetime_frame['A'][-5:] = nan
+ datetime_frame['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
@@ -649,19 +649,19 @@ def test_replace_convert(self):
assert_series_equal(expec, res)
def test_replace_mixed(self):
- mf = self.mixed_frame
+ mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
- result = self.mixed_frame.replace(np.nan, -18)
- expected = self.mixed_frame.fillna(value=-18)
+ result = float_string_frame.replace(np.nan, -18)
+ expected = float_string_frame.fillna(value=-18)
assert_frame_equal(result, expected)
- assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
+ assert_frame_equal(result.replace(-18, nan), float_string_frame)
- result = self.mixed_frame.replace(np.nan, -1e8)
- expected = self.mixed_frame.fillna(value=-1e8)
+ result = float_string_frame.replace(np.nan, -1e8)
+ expected = float_string_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
- assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
+ assert_frame_equal(result.replace(-1e8, nan), float_string_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
@@ -729,29 +729,29 @@ def test_replace_simple_nested_dict_with_nonexistent_value(self):
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
- orig_value = self.tsframe.iloc[0, 0]
- orig2 = self.tsframe.iloc[1, 0]
+ orig_value = datetime_frame.iloc[0, 0]
+ orig2 = datetime_frame.iloc[1, 0]
- self.tsframe.iloc[0, 0] = nan
- self.tsframe.iloc[1, 0] = 1
+ datetime_frame.iloc[0, 0] = nan
+ datetime_frame.iloc[1, 0] = 1
- result = self.tsframe.replace(to_replace={nan: 0})
- expected = self.tsframe.T.replace(to_replace={nan: 0}).T
+ result = datetime_frame.replace(to_replace={nan: 0})
+ expected = datetime_frame.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
- result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
- tsframe = self.tsframe.copy()
+ result = datetime_frame.replace(to_replace={nan: 0, 1: -1e8})
+ tsframe = datetime_frame.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
- self.tsframe.iloc[0, 0] = orig_value
- self.tsframe.iloc[1, 0] = orig2
+ datetime_frame.iloc[0, 0] = orig_value
+ datetime_frame.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
- tsframe = self.tsframe.copy().astype(np.float32)
+ tsframe = datetime_frame.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 668613c494a47..ae8173f972821 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -30,7 +30,7 @@ class TestDataFrameReprInfoEtc(TestData):
def test_repr_empty(self):
# empty
- foo = repr(self.empty) # noqa
+ foo = repr(empty_frame) # noqa
# empty with index
frame = DataFrame(index=np.arange(1000))
@@ -40,8 +40,8 @@ def test_repr_mixed(self):
buf = StringIO()
# mixed
- foo = repr(self.mixed_frame) # noqa
- self.mixed_frame.info(verbose=False, buf=buf)
+ foo = repr(float_string_frame) # noqa
+ float_string_frame.info(verbose=False, buf=buf)
@pytest.mark.slow
def test_repr_mixed_big(self):
@@ -58,12 +58,12 @@ def test_repr(self):
buf = StringIO()
# small one
- foo = repr(self.frame)
- self.frame.info(verbose=False, buf=buf)
+ foo = repr(float_frame)
+ float_frame.info(verbose=False, buf=buf)
# even smaller
- self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
- self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
+ float_frame.reindex(columns=['A']).info(verbose=False, buf=buf)
+ float_frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
@@ -72,7 +72,7 @@ def test_repr(self):
foo = repr(no_index) # noqa
# no columns or index
- self.empty.info(buf=buf)
+ empty_frame.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
@@ -113,13 +113,13 @@ def test_repr_unsortable(self):
repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
- repr(self.frame)
+ repr(float_frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
- repr(self.frame)
+ repr(float_frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
- repr(self.frame)
+ repr(float_frame)
tm.reset_display_options()
@@ -197,8 +197,8 @@ def test_latex_repr(self):
@tm.capture_stdout
def test_info(self):
io = StringIO()
- self.frame.info(buf=io)
- self.tsframe.info(buf=io)
+ float_frame.info(buf=io)
+ datetime_frame.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 9f6735c7ba2bf..83ef229e621f3 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -117,7 +117,7 @@ def test_pivot_index_none(self):
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
- df = self.frame.copy()
+ df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
@@ -485,11 +485,11 @@ def test_unstack_level_binding(self):
def test_unstack_to_series(self):
# check reversibility
- data = self.frame.unstack()
+ data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
- assert_frame_equal(undo, self.frame)
+ assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 599ae683f914b..96f4a4aafff80 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -276,7 +276,7 @@ def test_sort_datetimes(self):
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
- s = self.frame['A']
+ s = float_frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index b1d9d362d1402..2d1cc90a1dfb9 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -30,10 +30,10 @@
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
- the_diff = self.tsframe.diff(1)
+ the_diff = datetime_frame.diff(1)
assert_series_equal(the_diff['A'],
- self.tsframe['A'] - self.tsframe['A'].shift(1))
+ datetime_frame['A'] - datetime_frame['A'].shift(1))
# int dtype
a = 10000000000000000
@@ -44,7 +44,7 @@ def test_diff(self):
assert rs.s[1] == 1
# mixed numeric
- tf = self.tsframe.astype('float32')
+ tf = datetime_frame.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
@@ -103,13 +103,13 @@ def test_diff_mixed_dtype(self):
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
- rs = self.tsframe.diff(-1)
- xp = self.tsframe - self.tsframe.shift(-1)
+ rs = datetime_frame.diff(-1)
+ xp = datetime_frame - datetime_frame.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
- rs = self.tsframe.diff(1.)
- xp = self.tsframe.diff(1)
+ rs = datetime_frame.diff(1.)
+ xp = datetime_frame.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
@@ -121,19 +121,19 @@ def test_diff_axis(self):
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
- rs = self.tsframe.pct_change(fill_method=None)
- assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
+ rs = datetime_frame.pct_change(fill_method=None)
+ assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
- rs = self.tsframe.pct_change(2)
- filled = self.tsframe.fillna(method='pad')
+ rs = datetime_frame.pct_change(2)
+ filled = datetime_frame.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
- rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
- filled = self.tsframe.fillna(method='bfill', limit=1)
+ rs = datetime_frame.pct_change(fill_method='bfill', limit=1)
+ filled = datetime_frame.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
- rs = self.tsframe.pct_change(freq='5D')
- filled = self.tsframe.fillna(method='pad')
+ rs = datetime_frame.pct_change(freq='5D')
+ filled = datetime_frame.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
@@ -157,16 +157,16 @@ def test_pct_change_shift_over_nas(self):
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
- rs_freq = self.tsframe.pct_change(freq=freq,
+ rs_freq = datetime_frame.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
- rs_periods = self.tsframe.pct_change(periods,
+ rs_periods = datetime_frame.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
- empty_ts = DataFrame(index=self.tsframe.index,
- columns=self.tsframe.columns)
+ empty_ts = DataFrame(index=datetime_frame.index,
+ columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
@@ -231,36 +231,36 @@ def test_frame_add_datetime64_col_other_units(self):
def test_shift(self):
# naive shift
- shiftedFrame = self.tsframe.shift(5)
- tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
+ shiftedFrame = datetime_frame.shift(5)
+ tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
- shiftedSeries = self.tsframe['A'].shift(5)
+ shiftedSeries = datetime_frame['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
- shiftedFrame = self.tsframe.shift(-5)
- tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
+ shiftedFrame = datetime_frame.shift(-5)
+ tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
- shiftedSeries = self.tsframe['A'].shift(-5)
+ shiftedSeries = datetime_frame['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
- unshifted = self.tsframe.shift(0)
- assert_frame_equal(unshifted, self.tsframe)
+ unshifted = datetime_frame.shift(0)
+ assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
- shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
- assert len(shiftedFrame) == len(self.tsframe)
+ shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
+ assert len(shiftedFrame) == len(datetime_frame)
- shiftedFrame2 = self.tsframe.shift(5, freq='B')
+ shiftedFrame2 = datetime_frame.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
- d = self.tsframe.index[0]
+ d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
- assert_series_equal(self.tsframe.xs(d),
+ assert_series_equal(datetime_frame.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
- int_shifted = self.intframe.shift(1) # noqa
+ int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
@@ -364,32 +364,32 @@ def test_tshift(self):
ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
- shifted = self.tsframe.tshift(1)
+ shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
- assert_frame_equal(self.tsframe, unshifted)
+ assert_frame_equal(datetime_frame, unshifted)
- shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
+ shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
assert_frame_equal(shifted, shifted2)
- inferred_ts = DataFrame(self.tsframe.values,
- Index(np.asarray(self.tsframe.index)),
- columns=self.tsframe.columns)
+ inferred_ts = DataFrame(datetime_frame.values,
+ Index(np.asarray(datetime_frame.index)),
+ columns=datetime_frame.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
- assert_frame_equal(shifted, self.tsframe.tshift(1))
+ assert_frame_equal(shifted, datetime_frame.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
- no_freq = self.tsframe.iloc[[0, 5, 7], :]
+ no_freq = datetime_frame.iloc[[0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
- ts = self.tsframe[::3]
+ ts = datetime_frame[::3]
- start, end = self.tsframe.index[3], self.tsframe.index[6]
+ start, end = datetime_frame.index[3], datetime_frame.index[6]
- start_missing = self.tsframe.index[2]
- end_missing = self.tsframe.index[7]
+ start_missing = datetime_frame.index[2]
+ end_missing = datetime_frame.index[7]
# neither specified
truncated = ts.truncate()
@@ -427,10 +427,10 @@ def test_truncate(self):
after=ts.index[0] + 1)
def test_truncate_copy(self):
- index = self.tsframe.index
- truncated = self.tsframe.truncate(index[5], index[10])
+ index = datetime_frame.index
+ truncated = datetime_frame.truncate(index[5], index[10])
truncated.values[:] = 5.
- assert not (self.tsframe.values[5:11] == 5).any()
+ assert not (datetime_frame.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
@@ -460,8 +460,8 @@ def test_truncate_nonsortedindex(self):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
- offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
- rule_monthly = self.tsframe.asfreq('BM')
+ offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
+ rule_monthly = datetime_frame.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
@@ -472,7 +472,7 @@ def test_asfreq(self):
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
- zero_length = self.tsframe.reindex([])
+ zero_length = datetime_frame.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
@@ -515,12 +515,12 @@ def test_asfreq_fillvalue(self):
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
- N = len(self.frame.index)
+ N = len(float_frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
- frame = DataFrame({'foo': mat}, index=self.frame.index)
+ frame = DataFrame({'foo': mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index e1c3c29ef2846..01cca17e72960 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -42,36 +42,36 @@ def read_csv(self, path, **kwargs):
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean('__tmp_from_csv_deprecation__') as path:
- self.tsframe.to_csv(path)
+ datetime_frame.to_csv(path)
with tm.assert_produces_warning(FutureWarning):
depr_recons = DataFrame.from_csv(path)
- assert_frame_equal(self.tsframe, depr_recons)
+ assert_frame_equal(datetime_frame, depr_recons)
def test_to_csv_from_csv1(self):
with ensure_clean('__tmp_to_csv_from_csv1__') as path:
- self.frame['A'][:5] = nan
+ float_frame['A'][:5] = nan
- self.frame.to_csv(path)
- self.frame.to_csv(path, columns=['A', 'B'])
- self.frame.to_csv(path, header=False)
- self.frame.to_csv(path, index=False)
+ float_frame.to_csv(path)
+ float_frame.to_csv(path, columns=['A', 'B'])
+ float_frame.to_csv(path, header=False)
+ float_frame.to_csv(path, index=False)
# test roundtrip
- self.tsframe.to_csv(path)
+ datetime_frame.to_csv(path)
recons = self.read_csv(path)
- assert_frame_equal(self.tsframe, recons)
+ assert_frame_equal(datetime_frame, recons)
- self.tsframe.to_csv(path, index_label='index')
+ datetime_frame.to_csv(path, index_label='index')
recons = self.read_csv(path, index_col=None)
- assert(len(recons.columns) == len(self.tsframe.columns) + 1)
+ assert(len(recons.columns) == len(datetime_frame.columns) + 1)
# no index
- self.tsframe.to_csv(path, index=False)
+ datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
- assert_almost_equal(self.tsframe.values, recons.values)
+ assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
@@ -104,14 +104,14 @@ def test_to_csv_from_csv2(self):
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
- self.frame2.to_csv(path, header=col_aliases)
+ float_frame2.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
- xp = self.frame2.copy()
+ xp = float_frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
- pytest.raises(ValueError, self.frame2.to_csv, path,
+ pytest.raises(ValueError, float_frame2.to_csv, path,
header=['AA', 'X'])
def test_to_csv_from_csv3(self):
@@ -151,14 +151,14 @@ def test_to_csv_from_csv5(self):
# tz, 8260
with ensure_clean('__tmp_to_csv_from_csv5__') as path:
- self.tzframe.to_csv(path)
+ timezone_frame.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: to_datetime(result[c]).dt.tz_convert(
- 'UTC').dt.tz_convert(self.tzframe[c].dt.tz)
+ 'UTC').dt.tz_convert(timezone_frame[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
- assert_frame_equal(result, self.tzframe)
+ assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
@@ -411,32 +411,32 @@ def _to_uni(x):
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
- self.frame['G'] = np.nan
+ float_frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
- self.frame['H'] = self.frame.index.map(f)
+ float_frame['H'] = float_frame.index.map(f)
with ensure_clean() as path:
- self.frame.to_csv(path)
+ float_frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
- assert_frame_equal(self.frame, recons, check_names=False)
- assert_frame_equal(np.isinf(self.frame),
+ assert_frame_equal(float_frame, recons, check_names=False)
+ assert_frame_equal(np.isinf(float_frame),
np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
- self.frame['E'] = np.inf
- self.frame['F'] = -np.inf
+ float_frame['E'] = np.inf
+ float_frame['F'] = -np.inf
with ensure_clean() as path:
- self.frame.to_csv(path)
+ float_frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
- assert_frame_equal(self.frame, recons, check_names=False)
- assert_frame_equal(np.isinf(self.frame),
+ assert_frame_equal(float_frame, recons, check_names=False)
+ assert_frame_equal(np.isinf(float_frame),
np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
@@ -479,7 +479,7 @@ def test_to_csv_headers(self):
def test_to_csv_multiindex(self):
- frame = self.frame
+ frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
@@ -501,10 +501,10 @@ def test_to_csv_multiindex(self):
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
- self.frame.index = old_index
+ float_frame.index = old_index
# try multiindex with dates
- tsframe = self.tsframe
+ tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
@@ -523,10 +523,10 @@ def test_to_csv_multiindex(self):
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
- assert_almost_equal(recons.values, self.tsframe.values)
+ assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
- self.tsframe.index = old_index
+ datetime_frame.index = old_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# GH3571, GH1651, GH3141
@@ -811,11 +811,11 @@ def test_to_csv_unicode_index_col(self):
def test_to_csv_stringio(self):
buf = StringIO()
- self.frame.to_csv(buf)
+ float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
# TODO to_csv drops column name
- assert_frame_equal(recons, self.frame, check_names=False)
+ assert_frame_equal(recons, float_frame, check_names=False)
def test_to_csv_float_format(self):
@@ -920,10 +920,10 @@ def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
- csv_str = self.frame.to_csv(path_or_buf=None)
+ csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
- assert_frame_equal(self.frame, recons)
+ assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize('df,encoding', [
(DataFrame([[0.123456, 0.234567, 0.567567],
@@ -968,7 +968,7 @@ def test_to_csv_compression(self, df, encoding, compression):
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
- dt_index = self.tsframe.index
+ dt_index = datetime_frame.index
datetime_frame = DataFrame(
{'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d')
| - [ ] close #22471
- [ ] tests updated
- [ ] removed TestData from frame-tests, replace with fixtures
| https://api.github.com/repos/pandas-dev/pandas/pulls/22928 | 2018-10-01T16:02:04Z | 2018-10-07T23:12:46Z | null | 2018-10-07T23:12:46Z |
Support passing function to Appender | diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index 6552655110557..e99ecc489b2b8 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -10,7 +10,8 @@
from pandas.compat import intern, PY3
import pandas.core.common as com
from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
-from pandas.util._decorators import deprecate_kwarg, make_signature
+from pandas.util._decorators import (deprecate_kwarg, make_signature,
+ Appender, Substitution)
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs,
validate_bool_kwarg)
@@ -531,3 +532,42 @@ def test_safe_import(monkeypatch):
monkeypatch.setitem(sys.modules, mod_name, mod)
assert not td.safe_import(mod_name, min_version="2.0")
assert td.safe_import(mod_name, min_version="1.0")
+
+
+class TestAppender(object):
+ def test_pass_callable(self):
+ # GH#22927
+
+ def func():
+ """foo"""
+ return
+
+ @Appender(func)
+ def wrapped():
+ return
+
+ assert wrapped.__doc__ == "foo"
+
+ def test_append_class(self):
+ # GH#22927
+
+ @Appender("bar")
+ class cls(object):
+ pass
+
+ assert cls.__doc__ == "bar"
+ assert cls.__name__ == "cls"
+ assert cls.__module__ == "pandas.tests.util.test_util"
+
+
+class TestSubstitution(object):
+ def test_substitute_class(self):
+ # GH#22927
+
+ @Substitution(name="Bond, James Bond")
+ class cls(object):
+ """%(name)s"""
+
+ assert cls.__doc__ == "Bond, James Bond"
+ assert cls.__name__ == "cls"
+ assert cls.__module__ == "pandas.tests.util.test_util"
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 82cd44113cb25..70016f5923acb 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -244,8 +244,8 @@ def __init__(self, *args, **kwargs):
self.params = args or kwargs
def __call__(self, func):
- func.__doc__ = func.__doc__ and func.__doc__ % self.params
- return func
+ new_doc = func.__doc__ and func.__doc__ % self.params
+ return _set_docstring(func, new_doc)
def update(self, *args, **kwargs):
"""
@@ -290,6 +290,12 @@ def my_dog(has='fleas'):
"""
def __init__(self, addendum, join='', indents=0):
+ if callable(addendum):
+ # allow for passing @Appender(func) instead of
+ # @Appender(func.__doc__), both more succinct and helpful when
+ # -oo optimization strips docstrings
+ addendum = addendum.__doc__ or ''
+
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
@@ -297,11 +303,41 @@ def __init__(self, addendum, join='', indents=0):
self.join = join
def __call__(self, func):
- func.__doc__ = func.__doc__ if func.__doc__ else ''
+ doc = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
- docitems = [func.__doc__, self.addendum]
- func.__doc__ = dedent(self.join.join(docitems))
- return func
+ docitems = [doc, self.addendum]
+ new_doc = dedent(self.join.join(docitems))
+
+ return _set_docstring(func, new_doc)
+
+
+def _set_docstring(obj, docstring):
+ """
+ Set the docstring for the given function or class
+
+ Parameters
+ ----------
+ obj : function, method, class
+ docstring : str
+
+ Returns
+ -------
+ same type as obj
+ """
+ if isinstance(obj, type):
+ # i.e. decorating a class, for which docstrings can not be edited
+
+ class Wrapped(obj):
+ __doc__ = docstring
+
+ Wrapped.__name__ = obj.__name__
+ Wrapped.__module__ = obj.__module__
+ # TODO: will this induce a perf penalty in MRO lookups?
+ return Wrapped
+
+ else:
+ obj.__doc__ = docstring
+ return obj
def indent(text, indents=1):
| Possibly addresses issues discussed in #20074? More generally, allows for less-verbose usage. | https://api.github.com/repos/pandas-dev/pandas/pulls/22927 | 2018-10-01T15:55:40Z | 2018-10-01T19:19:39Z | null | 2018-10-01T19:19:43Z |
TST/CLN: Fixturize tests/frame/test_block_internals.py | diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 3fe1c84174acb..5f1d4954521ed 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -22,27 +22,25 @@
import pandas.util.testing as tm
-from pandas.tests.frame.common import TestData
-
# Segregated collection of methods that require the BlockManager internal data
# structure
-class TestDataFrameBlockInternals(TestData):
+class TestDataFrameBlockInternals():
- def test_cast_internals(self):
- casted = DataFrame(self.frame._data, dtype=int)
- expected = DataFrame(self.frame._series, dtype=int)
+ def test_cast_internals(self, float_frame):
+ casted = DataFrame(float_frame._data, dtype=int)
+ expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
- casted = DataFrame(self.frame._data, dtype=np.int32)
- expected = DataFrame(self.frame._series, dtype=np.int32)
+ casted = DataFrame(float_frame._data, dtype=np.int32)
+ expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
- def test_consolidate(self):
- self.frame['E'] = 7.
- consolidated = self.frame._consolidate()
+ def test_consolidate(self, float_frame):
+ float_frame['E'] = 7.
+ consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
@@ -50,92 +48,92 @@ def test_consolidate(self):
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
- self.frame['F'] = 8.
- assert len(self.frame._data.blocks) == 3
+ float_frame['F'] = 8.
+ assert len(float_frame._data.blocks) == 3
- self.frame._consolidate(inplace=True)
- assert len(self.frame._data.blocks) == 1
+ float_frame._consolidate(inplace=True)
+ assert len(float_frame._data.blocks) == 1
- def test_consolidate_deprecation(self):
- self.frame['E'] = 7
+ def test_consolidate_deprecation(self, float_frame):
+ float_frame['E'] = 7
with tm.assert_produces_warning(FutureWarning):
- self.frame.consolidate()
+ float_frame.consolidate()
- def test_consolidate_inplace(self):
- frame = self.frame.copy() # noqa
+ def test_consolidate_inplace(self, float_frame):
+ frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
- self.frame[chr(letter)] = chr(letter)
+ float_frame[chr(letter)] = chr(letter)
- def test_values_consolidate(self):
- self.frame['E'] = 7.
- assert not self.frame._data.is_consolidated()
- _ = self.frame.values # noqa
- assert self.frame._data.is_consolidated()
+ def test_values_consolidate(self, float_frame):
+ float_frame['E'] = 7.
+ assert not float_frame._data.is_consolidated()
+ _ = float_frame.values # noqa
+ assert float_frame._data.is_consolidated()
- def test_modify_values(self):
- self.frame.values[5] = 5
- assert (self.frame.values[5] == 5).all()
+ def test_modify_values(self, float_frame):
+ float_frame.values[5] = 5
+ assert (float_frame.values[5] == 5).all()
# unconsolidated
- self.frame['E'] = 7.
- self.frame.values[6] = 6
- assert (self.frame.values[6] == 6).all()
+ float_frame['E'] = 7.
+ float_frame.values[6] = 6
+ assert (float_frame.values[6] == 6).all()
- def test_boolean_set_uncons(self):
- self.frame['E'] = 7.
+ def test_boolean_set_uncons(self, float_frame):
+ float_frame['E'] = 7.
- expected = self.frame.values.copy()
+ expected = float_frame.values.copy()
expected[expected > 1] = 2
- self.frame[self.frame > 1] = 2
- assert_almost_equal(expected, self.frame.values)
+ float_frame[float_frame > 1] = 2
+ assert_almost_equal(expected, float_frame.values)
- def test_values_numeric_cols(self):
- self.frame['foo'] = 'bar'
+ def test_values_numeric_cols(self, float_frame):
+ float_frame['foo'] = 'bar'
- values = self.frame[['A', 'B', 'C', 'D']].values
+ values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
- def test_values_lcd(self):
+ def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
- values = self.mixed_float[['A', 'B', 'C', 'D']].values
+ values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
- values = self.mixed_float[['A', 'B', 'C']].values
+ values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
- values = self.mixed_float[['C']].values
+ values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
- values = self.mixed_int[['A', 'B', 'C', 'D']].values
+ values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
- values = self.mixed_int[['A', 'D']].values
+ values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
- values = self.mixed_int[['A', 'B', 'C']].values
+ values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
- values = self.mixed_int[['B', 'C']].values
+ values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
- values = self.mixed_int[['A', 'C']].values
+ values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
- values = self.mixed_int[['C', 'D']].values
+ values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
- values = self.mixed_int[['A']].values
+ values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
- values = self.mixed_int[['C']].values
+ values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
@@ -205,7 +203,7 @@ def test_constructor_with_convert(self):
None], np.object_), name='A')
assert_series_equal(result, expected)
- def test_construction_with_mixed(self):
+ def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
@@ -219,11 +217,11 @@ def test_construction_with_mixed(self):
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
- self.mixed_frame['datetime'] = datetime.now()
- self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
- assert self.mixed_frame['datetime'].dtype == 'M8[ns]'
- assert self.mixed_frame['timedelta'].dtype == 'm8[ns]'
- result = self.mixed_frame.get_dtype_counts().sort_values()
+ float_string_frame['datetime'] = datetime.now()
+ float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
+ assert float_string_frame['datetime'].dtype == 'M8[ns]'
+ assert float_string_frame['timedelta'].dtype == 'm8[ns]'
+ result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
@@ -296,9 +294,9 @@ def test_equals_different_blocks(self):
assert df0.equals(df1)
assert df1.equals(df0)
- def test_copy_blocks(self):
+ def test_copy_blocks(self, float_frame):
# API/ENH 9607
- df = DataFrame(self.frame, copy=True)
+ df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
@@ -314,9 +312,9 @@ def test_copy_blocks(self):
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
- def test_no_copy_blocks(self):
+ def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
- df = DataFrame(self.frame, copy=True)
+ df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
@@ -332,29 +330,29 @@ def test_no_copy_blocks(self):
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
- def test_copy(self):
- cop = self.frame.copy()
+ def test_copy(self, float_frame, float_string_frame):
+ cop = float_frame.copy()
cop['E'] = cop['A']
- assert 'E' not in self.frame
+ assert 'E' not in float_frame
# copy objects
- copy = self.mixed_frame.copy()
- assert copy._data is not self.mixed_frame._data
+ copy = float_string_frame.copy()
+ assert copy._data is not float_string_frame._data
- def test_pickle(self):
- unpickled = tm.round_trip_pickle(self.mixed_frame)
- assert_frame_equal(self.mixed_frame, unpickled)
+ def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
+ unpickled = tm.round_trip_pickle(float_string_frame)
+ assert_frame_equal(float_string_frame, unpickled)
# buglet
- self.mixed_frame._data.ndim
+ float_string_frame._data.ndim
# empty
- unpickled = tm.round_trip_pickle(self.empty)
+ unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
- unpickled = tm.round_trip_pickle(self.tzframe)
- assert_frame_equal(self.tzframe, unpickled)
+ unpickled = tm.round_trip_pickle(timezone_frame)
+ assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
@@ -388,9 +386,9 @@ def test_consolidate_datetime64(self):
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
- def test_is_mixed_type(self):
- assert not self.frame._is_mixed_type
- assert self.mixed_frame._is_mixed_type
+ def test_is_mixed_type(self, float_frame, float_string_frame):
+ assert not float_frame._is_mixed_type
+ assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
@@ -448,23 +446,23 @@ def test_get_numeric_data_extension_dtype(self):
expected = df.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
- def test_convert_objects(self):
+ def test_convert_objects(self, float_string_frame):
- oops = self.mixed_frame.T.T
+ oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
- assert_frame_equal(converted, self.mixed_frame)
+ assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
- self.mixed_frame['H'] = '1.'
- self.mixed_frame['I'] = '1'
+ float_string_frame['H'] = '1.'
+ float_string_frame['I'] = '1'
# add in some items that will be nan
- length = len(self.mixed_frame)
- self.mixed_frame['J'] = '1.'
- self.mixed_frame['K'] = '1'
- self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled'
- converted = self.mixed_frame._convert(datetime=True, numeric=True)
+ length = len(float_string_frame)
+ float_string_frame['J'] = '1.'
+ float_string_frame['K'] = '1'
+ float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
+ converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
@@ -473,14 +471,14 @@ def test_convert_objects(self):
assert len(converted['K'].dropna()) == length - 5
# via astype
- converted = self.mixed_frame.copy()
+ converted = float_string_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
- converted = self.mixed_frame.copy()
+ converted = float_string_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
| - [ ] Following h-vetinari's footstep (#22471 ), "translate" one of the test file
- [ ] tests added / passed
| https://api.github.com/repos/pandas-dev/pandas/pulls/22926 | 2018-10-01T14:38:40Z | 2018-10-14T17:18:26Z | 2018-10-14T17:18:26Z | 2018-10-14T17:18:40Z |
Updated README.md | diff --git a/README.md b/README.md
index f26b9598bb5d3..66771e5d6a00e 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
<div align="center">
- <img src="https://github.com/pandas-dev/pandas/blob/master/doc/logo/pandas_logo.png"><br>
+<img src="https://github.com/pandas-dev/pandas/blob/master/doc/logo/pandas_logo.png"><br>
</div>
-----------------
@@ -103,7 +103,7 @@ its way toward this goal.
Here are just a few of the things that pandas does well:
- Easy handling of [**missing data**][missing-data] (represented as
- `NaN`) in floating point as well as non-floating point data
+ `NaN`- 'Not a Number') in floating point as well as non-floating point data
- Size mutability: columns can be [**inserted and
deleted**][insertion-deletion] from DataFrame and higher dimensional
objects
| Removed extra white spaces and clarified that NAN stands for Not a Number.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22925 | 2018-10-01T14:16:06Z | 2018-10-01T20:32:48Z | null | 2018-10-01T20:32:48Z |
DOC GH22893 Fix docstring of groupby in pandas/core/generic.py | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 393e7caae5fab..5b4ce5a382324 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7063,8 +7063,12 @@ def clip_lower(self, threshold, axis=None, inplace=False):
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, observed=False, **kwargs):
"""
- Group series using mapper (dict or key function, apply given function
- to group, return result as series) or by a series of columns.
+ Group DataFrame or Series using a mapper or by a Series of columns.
+
+ A groupby operation involves some combination of splitting the
+ object, applying a function, and combining the results. This can be
+ used to group large amounts of data and compute operations on these
+ groups.
Parameters
----------
@@ -7077,54 +7081,95 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted a (single) key.
- axis : int, default 0
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
- level or levels
- as_index : boolean, default True
+ level or levels.
+ as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
- effectively "SQL-style" grouped output
- sort : boolean, default True
+ effectively "SQL-style" grouped output.
+ sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
- group. groupby preserves the order of rows within each group.
- group_keys : boolean, default True
- When calling apply, add group keys to index to identify pieces
- squeeze : boolean, default False
- reduce the dimensionality of the return type if possible,
- otherwise return a consistent type
- observed : boolean, default False
- This only applies if any of the groupers are Categoricals
+ group. Groupby preserves the order of rows within each group.
+ group_keys : bool, default True
+ When calling apply, add group keys to index to identify pieces.
+ squeeze : bool, default False
+ Reduce the dimensionality of the return type if possible,
+ otherwise return a consistent type.
+ observed : bool, default False
+ This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
+ **kwargs
+ Optional, only accepts keyword argument 'mutated' and is passed
+ to groupby.
+
Returns
-------
- GroupBy object
+ DataFrameGroupBy or SeriesGroupBy
+ Depends on the calling object and returns groupby object that
+ contains information about the groups.
- Examples
+ See Also
--------
- DataFrame results
-
- >>> data.groupby(func, axis=0).mean()
- >>> data.groupby(['col1', 'col2'])['col3'].mean()
-
- DataFrame with hierarchical index
-
- >>> data.groupby(['col1', 'col2']).mean()
+ resample : Convenience method for frequency conversion and resampling
+ of time series.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
- See also
+ Examples
--------
- resample : Convenience method for frequency conversion and resampling
- of time series.
+ >>> df = pd.DataFrame({'Animal' : ['Falcon', 'Falcon',
+ ... 'Parrot', 'Parrot'],
+ ... 'Max Speed' : [380., 370., 24., 26.]})
+ >>> df
+ Animal Max Speed
+ 0 Falcon 380.0
+ 1 Falcon 370.0
+ 2 Parrot 24.0
+ 3 Parrot 26.0
+ >>> df.groupby(['Animal']).mean()
+ Max Speed
+ Animal
+ Falcon 375.0
+ Parrot 25.0
+
+ **Hierarchical Indexes**
+
+ We can groupby different levels of a hierarchical index
+ using the `level` parameter:
+
+ >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
+ ... ['Capitve', 'Wild', 'Capitve', 'Wild']]
+ >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
+ >>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]},
+ ... index=index)
+ >>> df
+ Max Speed
+ Animal Type
+ Falcon Capitve 390.0
+ Wild 350.0
+ Parrot Capitve 30.0
+ Wild 20.0
+ >>> df.groupby(level=0).mean()
+ Max Speed
+ Animal
+ Falcon 370.0
+ Parrot 25.0
+ >>> df.groupby(level=1).mean()
+ Max Speed
+ Type
+ Capitve 210.0
+ Wild 185.0
"""
from pandas.core.groupby.groupby import groupby
| - [x] closes #22893
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Updated docstring following https://pandas.pydata.org/pandas-docs/stable/contributing_docstring.html
I changed the examples into similar ones that I thought better demonstrated the function, but I'm willing to change them again if they aren't up to par.
I also might need some help describing the kwargs. It only accepts 'mutated' as a keyword argument and I can't tell if it does anything. Apologies if the answer is obvious!
| https://api.github.com/repos/pandas-dev/pandas/pulls/22920 | 2018-10-01T07:18:21Z | 2018-10-03T00:01:09Z | 2018-10-03T00:01:08Z | 2018-10-03T00:01:15Z |
CLN GH22874 replace bare excepts in pandas/io/pytables.py | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c57b1c3e211f6..fc9e415ed38f7 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -258,7 +258,7 @@ def _tables():
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
- except:
+ except AttributeError:
pass
return _table_mod
@@ -395,11 +395,11 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs):
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
- except:
+ except (ValueError, TypeError):
# if there is an error, close the store
try:
store.close()
- except:
+ except AttributeError:
pass
raise
@@ -517,7 +517,7 @@ def __getattr__(self, name):
""" allow attribute access to get stores """
try:
return self.get(name)
- except:
+ except (KeyError, ClosedFileError):
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
@@ -675,7 +675,7 @@ def flush(self, fsync=False):
if fsync:
try:
os.fsync(self._handle.fileno())
- except:
+ except OSError:
pass
def get(self, key):
@@ -1161,7 +1161,7 @@ def get_node(self, key):
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
- except:
+ except _table_mod.exceptions.NoSuchNodeError:
return None
def get_storer(self, key):
@@ -1270,7 +1270,7 @@ def _validate_format(self, format, kwargs):
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
- except:
+ except KeyError:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
@@ -1307,7 +1307,7 @@ def error(t):
try:
pt = _TYPE_MAP[type(value)]
- except:
+ except KeyError:
error('_TYPE_MAP')
# we are actually a table
@@ -1318,7 +1318,7 @@ def error(t):
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
- except:
+ except KeyError:
error('_STORER_MAP')
# existing node (and must be a table)
@@ -1354,12 +1354,12 @@ def error(t):
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
- except:
+ except IndexError:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
- except:
+ except KeyError:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
@@ -1624,7 +1624,7 @@ def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
- except:
+ except AttributeError:
False
def copy(self):
@@ -1654,9 +1654,10 @@ def convert(self, values, nan_rep, encoding, errors):
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
+ # making an Index instance could throw a number of different errors
try:
self.values = Index(values, **kwargs)
- except:
+ except Exception: # noqa: E722
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
@@ -1869,7 +1870,7 @@ def create_for_block(
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
- except:
+ except IndexError:
pass
return cls(name=name, cname=cname, **kwargs)
@@ -2232,7 +2233,7 @@ def convert(self, values, nan_rep, encoding, errors):
try:
self.data = self.data.astype(dtype, copy=False)
- except:
+ except TypeError:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
@@ -2325,7 +2326,7 @@ def set_version(self):
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
- except:
+ except AttributeError:
self.version = (0, 0, 0)
@property
@@ -2769,7 +2770,7 @@ def write_array(self, key, value, items=None):
else:
try:
items = list(items)
- except:
+ except TypeError:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
@@ -2843,7 +2844,7 @@ class SeriesFixed(GenericFixed):
def shape(self):
try:
return len(getattr(self.group, 'values')),
- except:
+ except (TypeError, AttributeError):
return None
def read(self, **kwargs):
@@ -2961,7 +2962,7 @@ def shape(self):
shape = shape[::-1]
return shape
- except:
+ except AttributeError:
return None
def read(self, start=None, stop=None, **kwargs):
@@ -3495,7 +3496,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
- except:
+ except KeyError:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
@@ -3614,7 +3615,7 @@ def get_blk_items(mgr, blocks):
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
- except:
+ except (IndexError, KeyError):
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(pprint_thing(item) for
@@ -3642,7 +3643,7 @@ def get_blk_items(mgr, blocks):
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
- except:
+ except (IndexError, KeyError):
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
@@ -4460,7 +4461,7 @@ def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
- except:
+ except KeyError:
idx = info[name] = dict()
return idx
@@ -4782,7 +4783,7 @@ def __init__(self, table, where=None, start=None, stop=None, **kwargs):
)
self.coordinates = where
- except:
+ except ValueError:
pass
if self.coordinates is None:
| This PR changes bare excepts in `pandas/io/pytables.py` to more specific exceptions. I defaulted to using specific exceptions that seemed likely/triggered test errors, but it may be the case that I'm missing some or it's better to leave it as `Exception`.
- [x] closes #22874
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22919 | 2018-10-01T01:57:14Z | 2018-10-02T21:14:49Z | 2018-10-02T21:14:49Z | 2018-10-02T22:35:49Z |
BUG: to_datetime preserves name of Index argument in the result | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b71edcf1f6f51..851c1a3fbd6e9 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -655,6 +655,7 @@ Datetimelike
- Bug in :class:`DatetimeIndex` incorrectly allowing indexing with ``Timedelta`` object (:issue:`20464`)
- Bug in :class:`DatetimeIndex` where frequency was being set if original frequency was ``None`` (:issue:`22150`)
- Bug in rounding methods of :class:`DatetimeIndex` (:meth:`~DatetimeIndex.round`, :meth:`~DatetimeIndex.ceil`, :meth:`~DatetimeIndex.floor`) and :class:`Timestamp` (:meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, :meth:`~Timestamp.floor`) could give rise to loss of precision (:issue:`22591`)
+- Bug in :func:`to_datetime` with an :class:`Index` argument that would drop the ``name`` from the result (:issue:`21697`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 57387b9ea870a..4a5290a90313d 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -99,13 +99,13 @@ def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
- return Index(result)
+ return Index(result, name=name)
else:
return DatetimeIndex(result, name=name)
return result.values
-def _return_parsed_timezone_results(result, timezones, box, tz):
+def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
@@ -119,6 +119,9 @@ def _return_parsed_timezone_results(result, timezones, box, tz):
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
+ name : string, default None
+ Name for a DatetimeIndex
+
Returns
-------
tz_result : ndarray of parsed dates with timezone
@@ -136,7 +139,7 @@ def _return_parsed_timezone_results(result, timezones, box, tz):
in zip(result, timezones)])
if box:
from pandas import Index
- return Index(tz_results)
+ return Index(tz_results, name=name)
return tz_results
@@ -209,7 +212,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
if box:
if errors == 'ignore':
from pandas import Index
- return Index(result)
+ return Index(result, name=name)
return DatetimeIndex(result, tz=tz, name=name)
return result
@@ -252,7 +255,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
- result, timezones, box, tz)
+ result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index cc6db8f5854c8..3b7d6a709230b 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -233,6 +233,15 @@ def test_to_datetime_parse_timezone_malformed(self, offset):
with pytest.raises(ValueError):
pd.to_datetime([date], format=fmt)
+ def test_to_datetime_parse_timezone_keeps_name(self):
+ # GH 21697
+ fmt = '%Y-%m-%d %H:%M:%S %z'
+ arg = pd.Index(['2010-01-01 12:00:00 Z'], name='foo')
+ result = pd.to_datetime(arg, format=fmt)
+ expected = pd.DatetimeIndex(['2010-01-01 12:00:00'], tz='UTC',
+ name='foo')
+ tm.assert_index_equal(result, expected)
+
class TestToDatetime(object):
def test_to_datetime_pydatetime(self):
@@ -765,6 +774,14 @@ def test_unit_rounding(self, cache):
expected = pd.Timestamp('2015-06-19 19:55:31.877000093')
assert result == expected
+ @pytest.mark.parametrize('cache', [True, False])
+ def test_unit_ignore_keeps_name(self, cache):
+ # GH 21697
+ expected = pd.Index([15e9] * 2, name='name')
+ result = pd.to_datetime(expected, errors='ignore', box=True, unit='s',
+ cache=cache)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize('cache', [True, False])
def test_dataframe(self, cache):
| - [x] closes #21697
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22918 | 2018-10-01T01:47:37Z | 2018-10-01T21:22:21Z | 2018-10-01T21:22:21Z | 2018-10-01T21:22:24Z |
Use fused types for _take_2d | diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index 40b1b1a282670..9f531f36d1a64 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -2,7 +2,6 @@
Template for each `dtype` helper function using 1-d template
# 1-d template
-- map_indices
- pad
- pad_1d
- pad_2d
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index b2551f3733904..130276ae0e73c 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -24,17 +24,8 @@ dtypes = [('object', 'object', 'Infinity()', 'NegInfinity()'),
@cython.wraparound(False)
@cython.boundscheck(False)
-{{if dtype == 'object'}}
-
-
def rank_1d_{{dtype}}(object in_arr, ties_method='average',
ascending=True, na_option='keep', pct=False):
-{{else}}
-
-
-def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True,
- na_option='keep', pct=False):
-{{endif}}
"""
Fast NaN-friendly version of scipy.stats.rankdata
"""
diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in
index 0e69324acd341..358479c837d05 100644
--- a/pandas/_libs/algos_take_helper.pxi.in
+++ b/pandas/_libs/algos_take_helper.pxi.in
@@ -260,33 +260,39 @@ def take_2d_multi_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values,
{{endfor}}
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# take_2d internal function
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
-{{py:
-
-# dtype, ctype, init_result
-dtypes = [('float64', 'float64_t', 'np.empty_like(values)'),
- ('uint64', 'uint64_t', 'np.empty_like(values)'),
- ('object', 'object', 'values.copy()'),
- ('int64', 'int64_t', 'np.empty_like(values)')]
-}}
+ctypedef fused take_t:
+ float64_t
+ uint64_t
+ int64_t
+ object
-{{for dtype, ctype, init_result in dtypes}}
-cdef _take_2d_{{dtype}}(ndarray[{{ctype}}, ndim=2] values, object idx):
+cdef _take_2d(ndarray[take_t, ndim=2] values, object idx):
cdef:
Py_ssize_t i, j, N, K
ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx
- ndarray[{{ctype}}, ndim=2] result
+ ndarray[take_t, ndim=2] result
object val
N, K = (<object> values).shape
- result = {{init_result}}
+
+ if take_t is object:
+ # evaluated at compile-time
+ result = values.copy()
+ else:
+ result = np.empty_like(values)
+
for i in range(N):
for j in range(K):
result[i, j] = values[i, indexer[i, j]]
return result
-{{endfor}}
+
+_take_2d_object = _take_2d[object]
+_take_2d_float64 = _take_2d[float64_t]
+_take_2d_int64 = _take_2d[int64_t]
+_take_2d_uint64 = _take_2d[uint64_t]
diff --git a/pandas/_libs/join_func_helper.pxi.in b/pandas/_libs/join_func_helper.pxi.in
index 73d231b8588dc..a72b113a6fdb6 100644
--- a/pandas/_libs/join_func_helper.pxi.in
+++ b/pandas/_libs/join_func_helper.pxi.in
@@ -68,21 +68,21 @@ def asof_join_backward_{{on_dtype}}_by_{{by_dtype}}(
# find last position in right whose value is less than left's
if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
+ while (right_pos < right_size and
+ right_values[right_pos] <= left_values[left_pos]):
hash_table.set_item(right_by_values[right_pos], right_pos)
right_pos += 1
else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
+ while (right_pos < right_size and
+ right_values[right_pos] < left_values[left_pos]):
hash_table.set_item(right_by_values[right_pos], right_pos)
right_pos += 1
right_pos -= 1
# save positions as the desired index
by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
+ found_right_pos = (hash_table.get_item(by_value)
+ if by_value in hash_table else -1)
left_indexer[left_pos] = left_pos
right_indexer[left_pos] = found_right_pos
@@ -133,21 +133,21 @@ def asof_join_forward_{{on_dtype}}_by_{{by_dtype}}(
# find first position in right whose value is greater than left's
if allow_exact_matches:
- while right_pos >= 0 and\
- right_values[right_pos] >= left_values[left_pos]:
+ while (right_pos >= 0 and
+ right_values[right_pos] >= left_values[left_pos]):
hash_table.set_item(right_by_values[right_pos], right_pos)
right_pos -= 1
else:
- while right_pos >= 0 and\
- right_values[right_pos] > left_values[left_pos]:
+ while (right_pos >= 0 and
+ right_values[right_pos] > left_values[left_pos]):
hash_table.set_item(right_by_values[right_pos], right_pos)
right_pos -= 1
right_pos += 1
# save positions as the desired index
by_value = left_by_values[left_pos]
- found_right_pos = hash_table.get_item(by_value)\
- if by_value in hash_table else -1
+ found_right_pos = (hash_table.get_item(by_value)
+ if by_value in hash_table else -1)
left_indexer[left_pos] = left_pos
right_indexer[left_pos] = found_right_pos
@@ -259,12 +259,12 @@ def asof_join_backward_{{on_dtype}}(
# find last position in right whose value is less than left's
if allow_exact_matches:
- while right_pos < right_size and\
- right_values[right_pos] <= left_values[left_pos]:
+ while (right_pos < right_size and
+ right_values[right_pos] <= left_values[left_pos]):
right_pos += 1
else:
- while right_pos < right_size and\
- right_values[right_pos] < left_values[left_pos]:
+ while (right_pos < right_size and
+ right_values[right_pos] < left_values[left_pos]):
right_pos += 1
right_pos -= 1
@@ -313,19 +313,19 @@ def asof_join_forward_{{on_dtype}}(
# find first position in right whose value is greater than left's
if allow_exact_matches:
- while right_pos >= 0 and\
- right_values[right_pos] >= left_values[left_pos]:
+ while (right_pos >= 0 and
+ right_values[right_pos] >= left_values[left_pos]):
right_pos -= 1
else:
- while right_pos >= 0 and\
- right_values[right_pos] > left_values[left_pos]:
+ while (right_pos >= 0 and
+ right_values[right_pos] > left_values[left_pos]):
right_pos -= 1
right_pos += 1
# save positions as the desired index
left_indexer[left_pos] = left_pos
- right_indexer[left_pos] = right_pos\
- if right_pos != right_size else -1
+ right_indexer[left_pos] = (right_pos
+ if right_pos != right_size else -1)
# if needed, verify that tolerance is met
if has_tolerance and right_pos != right_size:
diff --git a/pandas/_libs/join_helper.pxi.in b/pandas/_libs/join_helper.pxi.in
index feb8cfb76a7f0..6ba587a5b04ea 100644
--- a/pandas/_libs/join_helper.pxi.in
+++ b/pandas/_libs/join_helper.pxi.in
@@ -4,42 +4,30 @@ Template for each `dtype` helper function for join
WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
"""
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# left_join_indexer, inner_join_indexer, outer_join_indexer
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
-{{py:
-
-# name, c_type, dtype
-dtypes = [('float64', 'float64_t', 'np.float64'),
- ('float32', 'float32_t', 'np.float32'),
- ('object', 'object', 'object'),
- ('int32', 'int32_t', 'np.int32'),
- ('int64', 'int64_t', 'np.int64'),
- ('uint64', 'uint64_t', 'np.uint64')]
-
-def get_dispatch(dtypes):
-
- for name, c_type, dtype in dtypes:
- yield name, c_type, dtype
-
-}}
+ctypedef fused join_t:
+ float64_t
+ float32_t
+ object
+ int32_t
+ int64_t
+ uint64_t
-{{for name, c_type, dtype in get_dispatch(dtypes)}}
# Joins on ordered, unique indices
# right might contain non-unique values
-
@cython.wraparound(False)
@cython.boundscheck(False)
-def left_join_indexer_unique_{{name}}(ndarray[{{c_type}}] left,
- ndarray[{{c_type}}] right):
+def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right):
cdef:
Py_ssize_t i, j, nleft, nright
ndarray[int64_t] indexer
- {{c_type}} lval, rval
+ join_t lval, rval
i = 0
j = 0
@@ -78,6 +66,33 @@ def left_join_indexer_unique_{{name}}(ndarray[{{c_type}}] left,
return indexer
+left_join_indexer_unique_float64 = left_join_indexer_unique["float64_t"]
+left_join_indexer_unique_float32 = left_join_indexer_unique["float32_t"]
+left_join_indexer_unique_object = left_join_indexer_unique["object"]
+left_join_indexer_unique_int32 = left_join_indexer_unique["int32_t"]
+left_join_indexer_unique_int64 = left_join_indexer_unique["int64_t"]
+left_join_indexer_unique_uint64 = left_join_indexer_unique["uint64_t"]
+
+
+{{py:
+
+# name, c_type, dtype
+dtypes = [('float64', 'float64_t', 'np.float64'),
+ ('float32', 'float32_t', 'np.float32'),
+ ('object', 'object', 'object'),
+ ('int32', 'int32_t', 'np.int32'),
+ ('int64', 'int64_t', 'np.int64'),
+ ('uint64', 'uint64_t', 'np.uint64')]
+
+def get_dispatch(dtypes):
+
+ for name, c_type, dtype in dtypes:
+ yield name, c_type, dtype
+
+}}
+
+{{for name, c_type, dtype in get_dispatch(dtypes)}}
+
# @cython.wraparound(False)
# @cython.boundscheck(False)
def left_join_indexer_{{name}}(ndarray[{{c_type}}] left,
| Misc cleanup
For some of the cython code I get unexplained test failures when using fused types. Vaguely hoping to narrow down the set of affected functions so as to make a helpful bug report to cython. | https://api.github.com/repos/pandas-dev/pandas/pulls/22917 | 2018-10-01T01:29:52Z | 2018-10-05T12:18:41Z | 2018-10-05T12:18:41Z | 2018-10-05T13:38:45Z |
CLN GH22875 Replace bare excepts by explicit excepts in pandas/io/ | diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 0d564069c681f..70c978a3b62ed 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -42,7 +42,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
- except:
+ except AttributeError:
pass
# Excel copies into clipboard with \t separation
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index b8b28a0b0c98c..e347f6bce0168 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -100,7 +100,7 @@ def check_main():
try:
return __IPYTHON__ or check_main() # noqa
- except:
+ except NameError:
return check_main()
@@ -118,7 +118,7 @@ def in_qtconsole():
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
- except:
+ except NameError:
return False
return False
@@ -137,7 +137,7 @@ def in_ipnb():
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
- except:
+ except NameError:
return False
return False
@@ -149,7 +149,7 @@ def in_ipython_frontend():
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
- except:
+ except NameError:
pass
return False
diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py
index dcd6f2cf4a718..2846525adbe6b 100644
--- a/pandas/io/formats/terminal.py
+++ b/pandas/io/formats/terminal.py
@@ -78,7 +78,7 @@ def _get_terminal_size_windows():
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
- except:
+ except (AttributeError, ValueError):
return None
if res:
import struct
@@ -108,7 +108,7 @@ def _get_terminal_size_tput():
output = proc.communicate(input=None)
rows = int(output[0])
return (cols, rows)
- except:
+ except OSError:
return None
@@ -120,7 +120,7 @@ def ioctl_GWINSZ(fd):
import struct
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
- except:
+ except (struct.error, IOError):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
@@ -129,13 +129,13 @@ def ioctl_GWINSZ(fd):
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
- except:
+ except OSError:
pass
if not cr or cr == (0, 0):
try:
from os import environ as env
cr = (env['LINES'], env['COLUMNS'])
- except:
+ except (ValueError, KeyError):
return None
return int(cr[1]), int(cr[0])
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 7a1e72637f4ce..77fbb27d01f86 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -194,7 +194,7 @@ def read(fh):
if should_close:
try:
path_or_buf.close()
- except: # noqa: flake8
+ except IOError:
pass
return l
@@ -703,7 +703,7 @@ def create_block(b):
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
- except:
+ except (ValueError, TypeError):
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a4f1155117b12..2def3b81c9518 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -459,7 +459,7 @@ def _read(filepath_or_buffer, kwds):
if should_close:
try:
filepath_or_buffer.close()
- except: # noqa: flake8
+ except ValueError:
pass
return data
@@ -1808,7 +1808,7 @@ def close(self):
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
- except:
+ except ValueError:
pass
def _set_noconvert_columns(self):
@@ -3034,7 +3034,7 @@ def converter(*date_cols):
errors='ignore',
infer_datetime_format=infer_datetime_format
)
- except:
+ except ValueError:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst))
else:
@@ -3263,7 +3263,7 @@ def _floatify_na_values(na_values):
v = float(v)
if not np.isnan(v):
result.add(v)
- except:
+ except (TypeError, ValueError, OverflowError):
pass
return result
@@ -3284,11 +3284,11 @@ def _stringify_na_values(na_values):
result.append(str(v))
result.append(v)
- except:
+ except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
- except:
+ except (TypeError, ValueError, OverflowError):
pass
return set(result)
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 9c219d7fd6997..3442401319ba8 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -165,16 +165,11 @@ def try_read(path, encoding=None):
return read_wrapper(lambda f: pkl.load(f))
except Exception:
# reg/patched pickle
- try:
- return read_wrapper(
- lambda f: pc.load(f, encoding=encoding, compat=False))
- # compat pickle
- except:
- return read_wrapper(
- lambda f: pc.load(f, encoding=encoding, compat=True))
+ return read_wrapper(
+ lambda f: pc.load(f, encoding=encoding, compat=False))
try:
return try_read(path)
- except:
+ except Exception:
if PY3:
return try_read(path, encoding='latin1')
raise
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 14e7ad9682db6..385396909a07b 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -246,7 +246,7 @@ def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
- except:
+ except UnicodeEncodeError:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index b8a0bf5733158..d72996a8e6157 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -46,7 +46,7 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None,
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
- except:
+ except ValueError:
pass
if format.lower() == 'xport':
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index a582d32741ae9..882fa0092b2cf 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -382,7 +382,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
try:
_is_table_name = pandas_sql.has_table(sql)
- except:
+ except (ImportError, AttributeError):
_is_table_name = False
if _is_table_name:
@@ -847,7 +847,7 @@ def _sqlalchemy_type(self, col):
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
- except:
+ except AttributeError:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
@@ -1360,7 +1360,7 @@ def run_transaction(self):
try:
yield cur
self.con.commit()
- except:
+ except Exception:
self.con.rollback()
raise
finally:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index efd5f337fdf69..a321e315f5225 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1252,12 +1252,12 @@ def _read_old_header(self, first_char):
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
- except:
+ except ValueError:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
- except:
+ except ValueError:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
| - [X] closes #22875
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I removed the except block in pandas/io/pickle.py:172:13 because the only difference between the try block and the except block was one parameter in the call to pc.load, and the parameter that was changing didn't look like it was being used by pc.load.
Original block:
```
try:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=False))
# compat pickle
except:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=True))
```
The function it was calling (in pandas/compat/pickle_compat.py):
```
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except:
raise
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/22916 | 2018-10-01T00:33:33Z | 2018-10-05T04:41:06Z | null | 2018-10-06T05:03:04Z |
[WIP] API/CLN: Refactor DataFrame.append | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 251bc6587872d..4db4239ea1b54 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6338,45 +6338,152 @@ def append(self, other, ignore_index=False,
3 3
4 4
"""
- if isinstance(other, (Series, dict)):
- if isinstance(other, dict):
- other = Series(other)
- if other.name is None and not ignore_index:
+ kwargs = {
+ 'ignore_index': ignore_index,
+ 'verify_integrity': verify_integrity,
+ 'sort': sort,
+ }
+
+ obj_type = type(other)
+ kwargs['_obj_type'] = obj_type
+ if issubclass(obj_type, dict):
+ return self._append_dict(other, **kwargs)
+ elif issubclass(obj_type, Series):
+ return self._append_series(other, **kwargs)
+ elif issubclass(obj_type, DataFrame):
+ return self._append_frame(other, **kwargs)
+ elif issubclass(obj_type, list):
+
+ try:
+ item_type = type(other[0])
+ except IndexError: # empty list!
+ return self._append_list_of_frames(other, **kwargs)
+ if not all(isinstance(i, item_type) for i in other[1:]):
+ if issubclass(item_type, (dict, Series, DataFrame)):
+ raise TypeError("When other is a list, its elements must"
+ " be all of the same type")
+ else:
+ raise TypeError("The value of other must be a"
+ " DataFrame or Series/dict-like object,"
+ " or list of these")
+ kwargs['_item_type'] = item_type
+
+ if issubclass(item_type, dict):
+ return self._append_list_of_dicts(other, **kwargs)
+ elif issubclass(item_type, Series):
+ return self._append_list_of_series(other, **kwargs)
+ elif issubclass(item_type, DataFrame):
+ return self._append_list_of_frames(other, **kwargs)
+ else:
+ raise TypeError("The value of other must be a"
+ " DataFrame or Series/dict-like object,"
+ " or list of these")
+ else:
+ raise TypeError("The value of other must be a"
+ " DataFrame or Series/dict-like object,"
+ " or list of these")
+
+ def _append_dict(self, other, *args, **kwargs):
+ return self._append_list_of_dicts([other], *args, **kwargs)
+
+ def _append_series(self, other, *args, **kwargs):
+ return self._append_list_of_series([other], *args, **kwargs)
+
+ def _append_frame(self, other, *args, **kwargs):
+ return self._append_list_of_frames([other], *args, **kwargs)
+
+ def _append_list_of_dicts(self, other, *args, **kwargs):
+ if not kwargs['ignore_index']:
+ raise TypeError('Can only append a dict if ignore_index=True')
+ return self._append_frame(DataFrame(other), *args, **kwargs)
+
+ def _append_list_of_series(self, other, *args, **kwargs):
+ if not kwargs['ignore_index']:
+ if any(series.name is None for series in other):
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
- if other.name is None:
- index = None
- else:
- # other must have the same index name as self, otherwise
- # index name will be reset
- index = Index([other.name], name=self.index.name)
+ if len(other) == 1:
+ # manually create DF for performance
+ ser = other[0]
+ df = DataFrame(ser.values.reshape(1, ser.shape[0]),
+ index=[ser.name], columns=ser.index)
+ else:
+ df = DataFrame(other)
- idx_diff = other.index.difference(self.columns)
- try:
- combined_columns = self.columns.append(idx_diff)
- except TypeError:
- combined_columns = self.columns.astype(object).append(idx_diff)
- other = other.reindex(combined_columns, copy=False)
- other = DataFrame(other.values.reshape((1, len(other))),
- index=index,
- columns=combined_columns)
- other = other._convert(datetime=True, timedelta=True)
- if not self.columns.equals(combined_columns):
- self = self.reindex(columns=combined_columns)
- elif isinstance(other, list) and not isinstance(other[0], DataFrame):
- other = DataFrame(other)
- if (self.columns.get_indexer(other.columns) >= 0).all():
- other = other.loc[:, self.columns]
+ return self._append_frame(df, *args, **kwargs)
+ def _append_list_of_frames(self, other, *args, **kwargs):
+ ignore_index = kwargs['ignore_index']
+ verify_integrity = kwargs['verify_integrity']
+ sort = kwargs['sort']
+ _obj_type = kwargs['_obj_type']
+ _item_type = kwargs.get('_item_type')
+
+ from pandas.core.indexes.api import _normalize_dataframes
from pandas.core.reshape.concat import concat
- if isinstance(other, (list, tuple)):
- to_concat = [self] + other
- else:
- to_concat = [self, other]
- return concat(to_concat, ignore_index=ignore_index,
- verify_integrity=verify_integrity,
- sort=sort)
+
+ # sorting behavior when sort=None
+ # TODO: remove when kwarg value change
+ if sort is None:
+ # stabilish desired behavior
+ if _obj_type in (dict, Series):
+ # dict/ser
+
+ sort = False
+ warn = False
+ elif _item_type in (dict, Series):
+ # [dict]/[ser]
+
+ if (self.columns.get_indexer(other[0].columns) >= 0).all():
+ # self.columns >= other[0].columns
+ sort = False
+ warn = False
+ else:
+ sort = True
+ types = [df.columns.dtype for df in [self] + other]
+ common = find_common_type(types)
+ warn = (common == object)
+ else:
+ # frame/[frame]
+
+ if all(self.columns.equals(df.columns) for df in other):
+ # all values the same
+ sort = False
+ warn = False
+ else:
+ sort = True
+ types = [df.columns.dtype for df in [self] + other]
+ common = find_common_type(types)
+ warn = (common == object)
+
+ # warn if necessary
+ if warn:
+ from pandas.core.indexes.api import _sort_msg
+ warnings.warn(_sort_msg, FutureWarning)
+
+ # The behavior of concat is a bit problematic as it is. To get around,
+ # we prepare the DataFrames before feeding them into concat.
+ to_concat = [self] + other
+ to_concat_norm = _normalize_dataframes(to_concat, sort=sort)
+ result = concat(to_concat_norm, ignore_index=ignore_index,
+ verify_integrity=verify_integrity, sort=sort)
+
+ # preserve base DataFrame indexes names
+ # XXX: how will this work with MultiIndex (?)
+ result.columns.name = self.columns.name
+ if not ignore_index:
+ result.index.name = self.index.name
+
+ # Reindexing the columns created an artificial float64 where it
+ # was not needed. We can convert the columns back to the expected
+ # type.
+ if result.shape[0] == 1:
+ base_frame = next(df for df in to_concat_norm if df.shape[0] == 1)
+ dtypes = base_frame.dtypes.to_dict()
+ result = result.astype(dtypes) # won't work well dups cols
+
+ return result
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index e50a4b099a8e1..eb1748a10197d 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -1,11 +1,19 @@
import textwrap
import warnings
-from pandas.core.indexes.base import (Index,
- _new_Index,
- ensure_index,
- ensure_index_from_sequences,
- InvalidIndexError) # noqa
+from pandas.core.dtypes.generic import (
+ ABCCategoricalIndex,
+ ABCIntervalIndex,
+ ABCMultiIndex,
+ ABCPeriodIndex,
+)
+from pandas.core.indexes.base import (
+ Index,
+ _new_Index,
+ ensure_index,
+ ensure_index_from_sequences,
+ InvalidIndexError,
+)
from pandas.core.indexes.category import CategoricalIndex # noqa
from pandas.core.indexes.multi import MultiIndex # noqa
from pandas.core.indexes.interval import IntervalIndex # noqa
@@ -29,6 +37,18 @@
""")
+class _CannotSortError(Exception):
+ pass
+
+
+class _CannotSortDuplicatesError(Exception):
+ pass
+
+
+class _DuplicatesError(Exception):
+ pass
+
+
# TODO: there are many places that rely on these private methods existing in
# pandas.core.index
__all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index',
@@ -160,3 +180,196 @@ def _all_indexes_same(indexes):
if not first.equals(index):
return False
return True
+
+
+def _normalize_dataframes(frame_list, verify_inputs=True, sort=False):
+ """Normalize the columns from a list of DataFrames
+
+ First, an index is created by merging all the original columns. Then,
+ all columns are reindexed to match this new index.
+
+ Parameters
+ ----------
+ index_list: list of Index objects
+ verify_inputs: boolean, default True
+ Verify if the input indexes contain duplicate values. Ignored when all
+ input indexes share the same identity (a is b).
+ sort: boolean, default False
+ Order resulting index. If False, values will come in the order they
+ appear.
+
+ Raises
+ ------
+ InvalidIndexError:
+ When there are duplicates in at least one of the indexes (col)
+ and they are not allowed.
+ TypeError:
+ When sort=True and the resulting index (col) could not be sorted.
+ """
+ orig_columns = [df.columns for df in frame_list]
+
+ try:
+ merged_columns = _merge_index_list(
+ orig_columns,
+ verify_dups=verify_inputs,
+ allow_matching_dups=verify_inputs, # same-id indexes allowed
+ sort=sort
+ )
+ except _DuplicatesError:
+ raise InvalidIndexError("Indexes with duplicates are only allowed"
+ " when they are the same (a is b).")
+ except _CannotSortDuplicatesError:
+ raise InvalidIndexError("When sort=True, indexes with duplicate"
+ " values are not allowed.")
+ except _CannotSortError:
+ raise TypeError("The resulting columns could not be sorted."
+ " You can try setting sort=False or use"
+ " compatible index types.")
+
+ # Because _merge_index_list may infer the index dtype based on values,
+ # we have to provide a workaround to conserve the original dtype.
+ #
+ # Empty indexes come from DataFrames with no columns, and we do not
+ # consider them when calculating the final index dtype.
+ #
+ # XXX: goes against DataFrame.append behavior for empty columns, where we
+ # let them be object dtype.
+ #
+ # What behavior should be adopted?
+ relevant_cols = [i for i in orig_columns
+ if not (len(i) == 0 and i.dtype == 'object')]
+ if relevant_cols:
+ from pandas.core.dtypes.cast import find_common_type
+ types = [i.dtype for i in relevant_cols]
+ common_type = find_common_type(types)
+ merged_columns = merged_columns.astype(common_type)
+
+ return [_reindex(df, merged_columns, axis=1) for df in frame_list]
+
+
+def _merge_index_list(index_list,
+ verify_dups=True,
+ allow_matching_dups=False,
+ sort=False):
+ """Merge a list of indexes into one big index
+
+ Parameters
+ ----------
+ index_list: list of Index objects
+ verify_dups: boolean, default True
+ Verify if the input indexes contain duplicate values.
+ allow_matching_dups: boolean, default False
+ Only relevant when verify_dups=True. Allow duplicate values when all
+ indexes have the same identity.
+ sort: boolean, default False
+ Order result index. If False, values will come in the order they
+ appear.
+
+ Raises
+ ------
+ _CannotSortError
+ When sort=True and the result index is not sortable.
+ _CannotSortDuplicatesError
+ When sort=True and at least one of the inputs contain duplicate
+ values.
+ _DuplicatesError
+ When verify_dups=True and at least one of the input indexes contain
+ duplicate values. This is error is not raised if
+ allow_matching_dups=True and all the indexes have a common identity.
+
+ Notes
+ -----
+ Empty indexes (of object dtype) are forgotten.
+ """
+ # unique index list (a is b)
+ uindex_list = com.get_distinct_objs(index_list)
+ uindex_list = [i for i in uindex_list if not i.is_empty()]
+
+ # verify duplicates
+ if sort or verify_dups:
+ has_dups = any(ix.has_duplicates for ix in uindex_list)
+ if has_dups:
+ if sort:
+ raise _CannotSortDuplicatesError("Cannot sort an index that"
+ " contains duplicate values.")
+ elif verify_dups and not allow_matching_dups:
+ raise _DuplicatesError("Index has duplicate values.")
+ elif verify_dups and allow_matching_dups and len(uindex_list) >= 2:
+ raise _DuplicatesError("Index has duplicate values and does"
+ " not match other indexes.")
+
+ # edge results
+ if len(uindex_list) == 0:
+ return Index([])
+ elif len(uindex_list) == 1:
+ return uindex_list[0]
+
+ # reduce to one result
+ result = uindex_list[0]
+ for idx in uindex_list[1:]:
+ result = _merge_indexes(result, idx)
+
+ # sort
+ return result if not sort else _sort_index(result)
+
+
+def _merge_indexes(index1, index2):
+ """Merge two indexes together
+ """
+
+ # lots of exception handling because we want to allow any
+ # indexes types to be merged together
+
+ try:
+ difference = index2.difference(index1)
+ except (TypeError, ValueError):
+ if isinstance(index2, (ABCIntervalIndex, ABCPeriodIndex)):
+ index2 = index2.astype(object)
+ difference = index2.difference(index1)
+ else:
+ raise
+
+ try:
+ return index1.append(difference)
+ except TypeError:
+ if isinstance(index1, ABCCategoricalIndex):
+ index1 = index1.astype(object)
+ return index1.append(difference)
+ raise
+
+
+def _sort_index(index):
+ """Sort index and raises when not possible
+ """
+ try:
+ return index.sort_values()
+ except TypeError:
+ raise _CannotSortError
+
+
+def _reindex(df, new_index, axis=0):
+ """Reindex df axis to match new_index
+
+ Parameters
+ ----------
+
+ df: a DataFrame object
+ new_index: an Index object
+ axis: int or str, default 0
+
+ Notes
+ -----
+
+ Works the same as DataFrame.reindex, but handles IntervalIndex and
+ MultiIndex errors.
+ """
+ try:
+ return df.reindex(new_index, axis=axis, copy=False)
+ except TypeError:
+ if isinstance(df.columns, ABCIntervalIndex):
+ df.columns = df.columns.astype(object)
+ elif isinstance(df.columns, ABCMultiIndex):
+ df.columns = df.columns.values
+ else:
+ raise
+ return df.reindex(new_index, axis=axis, copy=False)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b2b6e02e908c5..49a2ede8beb3a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1541,6 +1541,9 @@ def is_unique(self):
def has_duplicates(self):
return not self.is_unique
+ def is_empty(self):
+ return self.inferred_type in ['empty']
+
def is_boolean(self):
return self.inferred_type in ['boolean']
diff --git a/pandas/tests/reshape/test_append.py b/pandas/tests/reshape/test_append.py
new file mode 100644
index 0000000000000..d2e07ec8d92ff
--- /dev/null
+++ b/pandas/tests/reshape/test_append.py
@@ -0,0 +1,1086 @@
+from itertools import product
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Index, Series
+from pandas.core.indexes.base import InvalidIndexError
+from pandas.util.testing import assert_frame_equal
+
+
+indexes = [
+ # indexes listed here must be sorted
+
+ # base
+ pd.Index(['A', 'B', 'C']),
+ pd.Index(['A', 'B', 'C'], name='foo'),
+
+ # numeric
+ pd.RangeIndex(3),
+ pd.Int64Index([3, 4, 5]),
+ pd.UInt64Index([6, 7, 8]),
+ pd.Float64Index([3.5, 4.5, 5.5]),
+ pd.Index([9, 10, 11], dtype=object), # fake int64
+
+ # datetime
+ pd.to_datetime(['2013-01-01', '2013-01-10', '2013-01-15']),
+ pd.to_timedelta(['1 day', '2 days', '3 days']),
+ pd.PeriodIndex(start='2000', periods=3),
+
+ # interval
+ pd.interval_range(start=0, end=3),
+
+ # categorical
+ pd.CategoricalIndex('A B C'.split()),
+ pd.CategoricalIndex('D E F'.split(), ordered=True),
+
+ # multi-index
+ pd.MultiIndex.from_arrays(['A B C'.split(), 'D E F'.split()]),
+]
+
+
+indexes_with_dups = [
+ # base
+ pd.Index(['A', 'B', 'B']),
+ pd.Index(['B', 'B', 'A']),
+ pd.Index(['A', 'B', 'B'], name='foo'),
+ pd.Index(['B', 'B', 'A'], name='bar'),
+
+ # numeric
+ pd.Index([9, 10, 10], dtype=object),
+ pd.Int64Index([3, 4, 4]),
+ pd.UInt64Index([6, 7, 7]),
+ pd.Float64Index([3.5, 4.5, 4.5]),
+
+ # datetime
+ pd.to_datetime(['2013-01-01', '2013-01-10', '2013-01-10']),
+ pd.to_timedelta(['1 day', '2 days', '2 days']),
+ pd.PeriodIndex([2000, 2001, 2001], freq='A'),
+
+ # interval
+ pd.IntervalIndex.from_arrays([0, 1, 1], [1, 2, 2]),
+
+ # categorical
+ pd.CategoricalIndex('A B B'.split()),
+ pd.CategoricalIndex('D E E'.split(), ordered=True),
+
+ # multi-index
+ pd.MultiIndex.from_arrays(['A B B'.split(), 'D E E'.split()]),
+]
+
+
+index_sort_groups = [
+ # When indexes from the same group are joined, the result is sortable.
+ # When indexes from different groups are joined, the result is not
+ # sortable.
+
+ [ # joining produces a string index
+ pd.Index(['A', 'B', 'C']),
+ pd.CategoricalIndex('A B C'.split()),
+ pd.CategoricalIndex('D E F'.split(), ordered=True)],
+
+ [ # numeric indexes
+ pd.RangeIndex(3),
+ pd.Int64Index([3, 4, 5]),
+ pd.UInt64Index([6, 7, 8]),
+ pd.Float64Index([3.5, 4.5, 5.5]),
+ pd.Index([9, 10, 11], dtype=object)],
+
+ [pd.to_datetime(['2013-01-01', '2013-01-10', '2013-01-15'])],
+ [pd.to_timedelta(['1 day', '2 days', '3 days'])],
+ [pd.PeriodIndex(start='2000', periods=3)],
+ [pd.interval_range(start=0, end=3)],
+ [pd.MultiIndex.from_arrays(['A B C'.split(), 'D E F'.split()])],
+]
+
+
+def cls_name(obj):
+ return obj.__class__.__name__
+
+
+@pytest.fixture(params=[True, False])
+def sort(request):
+ """Boolean sort keyword for DataFrame.append
+ """
+ return request.param
+
+
+class TestAppendBasic(object):
+ def test_different_types_of_input(self, sort):
+ # There are 7 types of accepted input by append:
+ #
+ # dict
+ # Series
+ # DataFrame
+ # empty list
+ # list of dicts
+ # list of Series
+ # list of DataFrames
+ #
+ # Using one or another should always be interchangeable.
+
+ # append to dict
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ map = {
+ 0: 7,
+ 1: 8,
+ 2: 9
+ }
+ result = df.append(map, ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to Series
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ result = df.append(ser, ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to DataFrame
+ df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ df2 = pd.DataFrame([[7, 8, 9]])
+ result = df1.append(df2, ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to empty list
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ result = df1.append([], sort=sort)
+ expected = df
+ assert_frame_equal(result, expected)
+
+ # append to list of dicts
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ map = {
+ 0: 7,
+ 1: 8,
+ 2: 9
+ }
+ result = df.append([map], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to list of Series
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ result = df.append([ser], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to list of DataFrames
+ df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ df2 = pd.DataFrame([[7, 8, 9]])
+ result = df1.append([df2], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to list of dicts (2 dicts)
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ map = {
+ 0: 7,
+ 1: 8,
+ 2: 9
+ }
+ result = df.append([map, map], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to list of Series (2 series)
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ result = df.append([ser, ser], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ # append to list of DataFrames (2 dframes)
+ df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ df2 = pd.DataFrame([[7, 8, 9]])
+ result = df1.append([df2, df2], ignore_index=True, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [7, 8, 9]])
+ assert_frame_equal(result, expected)
+
+ def test_bad_input_type(self, sort):
+ # When appending a bad input type, the function
+ # should raise an exception.
+
+ bad_input_msg = r'The value of other must be .*'
+ mixed_list_msg = r'When other is a list, its .*'
+
+ # integer input
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append(1, ignore_index=True, sort=sort)
+
+ # string input
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append("1 2 3", ignore_index=True, sort=sort)
+
+ # tuple input
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append((df, ), ignore_index=True, sort=sort)
+
+ # list of integers
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append([1], ignore_index=True, sort=sort)
+
+ # list of strings
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append(["1 2 3"], ignore_index=True, sort=sort)
+
+ # list of lists
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append([[df]], ignore_index=True, sort=sort)
+
+ # list of tuples
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append([(df, )], ignore_index=True, sort=sort)
+
+ # mixed list
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ dict = {
+ 0: 10,
+ 1: 11,
+ 2: 12
+ }
+ with pytest.raises(TypeError, match=mixed_list_msg):
+ df.append([ser, dict], ignore_index=True, sort=sort)
+ with pytest.raises(TypeError, match=mixed_list_msg):
+ df.append([dict, ser], ignore_index=True, sort=sort)
+
+ # mixed list with bad first element
+ # (when the first element is bad, display the
+ # bad input msg instead of the mixed list one)
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ with pytest.raises(TypeError, match=bad_input_msg):
+ df.append([1, ser, ser], ignore_index=True, sort=sort)
+
+ # mixed list with bad second element
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ with pytest.raises(TypeError, match=mixed_list_msg):
+ df.append([ser, 1, ser], ignore_index=True, sort=sort)
+
+ # mixed list with bad third element
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ ser = pd.Series([7, 8, 9])
+ with pytest.raises(TypeError, match=mixed_list_msg):
+ df.append([ser, ser, 1], ignore_index=True, sort=sort)
+
+ def test_no_unecessary_upcast(self, sort):
+ # GH: 22621
+ # When appending, the result columns should
+ # not be float64 without necessity.
+
+ # basic
+ df1 = pd.DataFrame([[1, 2, 3]])
+ df2 = pd.DataFrame([[4, 5, 6]], index=[1])
+ result = df1.append(df2, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ assert_frame_equal(result, expected)
+
+ # 0 rows 0 columns
+ df1 = pd.DataFrame([[1, 2, 3]])
+ df2 = pd.DataFrame()
+ result = df1.append(df2, sort=sort)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ df1 = pd.DataFrame()
+ df2 = pd.DataFrame([[1, 2, 3]])
+ result = df1.append(df2, sort=sort)
+ expected = df2.copy()
+ assert_frame_equal(result, expected)
+
+ # 0 rows 2 columns
+ df1 = pd.DataFrame([[1, 2, 3]], columns=[0, 1, 2])
+ df2 = pd.DataFrame(columns=[3, 4])
+ result = df1.append(df2, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3, np.nan, np.nan]])
+ assert_frame_equal(result, expected)
+
+ df1 = pd.DataFrame(columns=[0, 1])
+ df2 = pd.DataFrame([[1, 2, 3]], columns=[2, 3, 4])
+ result = df1.append(df2, sort=sort)
+ expected = pd.DataFrame([[np.nan, np.nan, 1, 2, 3]])
+ assert_frame_equal(result, expected)
+
+ # big.append(small)
+ big = pd.DataFrame([[1, 2, 3]])
+ small = pd.DataFrame([[4, 5]], index=[1])
+ result = big.append(small, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, np.nan]])
+ assert_frame_equal(result, expected)
+
+ # small.append(big)
+ small = pd.DataFrame([[1, 2]])
+ big = pd.DataFrame([[3, 4, 5]], index=[1])
+ result = small.append(big, sort=sort)
+ expected = pd.DataFrame([[1, 2, np.nan], [3, 4, 5]])
+ assert_frame_equal(result, expected)
+
+
+class TestAppendSortNone(object):
+ """Regression tests to preserve the behavior of sort=None
+ """
+
+ def generate_frames(self, compare, special):
+ if compare == 'lt':
+ if special:
+ df1 = DataFrame([[11, 12]], columns=[2, 1])
+ df2 = DataFrame([[13, 14, 15]], columns=[3, 2, 1])
+ else:
+ df1 = DataFrame([[11, 12]], columns=list('ba'))
+ df2 = DataFrame([[13, 14, 15]], columns=list('cba'))
+ elif compare == 'eq':
+ if special:
+ df1 = DataFrame([[11, 12, 13]], columns=[3, 2, 1])
+ df2 = DataFrame([[14, 15, 16]], columns=[3, 2, 1])
+ else:
+ df1 = DataFrame([[11, 12, 13]], columns=list('cba'))
+ df2 = DataFrame([[14, 15, 16]], columns=list('cba'))
+ elif compare == 'gt':
+ if special:
+ df1 = DataFrame([[11, 12, 13]], columns=[3, 2, 1])
+ df2 = DataFrame([[14, 15]], columns=[2, 1])
+ else:
+ df1 = DataFrame([[11, 12, 13]], columns=list('cba'))
+ df2 = DataFrame([[14, 15]], columns=list('ba'))
+ elif compare == 'dups':
+ # special category for duplicates
+ # assumes compare = 'eq'
+ if special:
+ df1 = DataFrame([[11, 12, 13]], columns=[3, 3, 1])
+ df2 = DataFrame([[14, 15, 16]], columns=[3, 3, 1])
+ else:
+ df1 = DataFrame([[11, 12, 13]], columns=list('cca'))
+ df2 = DataFrame([[14, 15, 16]], columns=list('cca'))
+
+ # avoid upcasting problems
+ df1 = df1.astype('float64')
+ df2 = df2.astype('float64')
+
+ return df1, df2
+
+ def merge_indexes(self, idx1, idx2, sort):
+ len1 = idx1.size
+ len2 = idx2.size
+
+ if len1 < len2:
+ # match 'lt' in self.generate_frames
+ vals1 = idx1.tolist()
+ vals2 = [idx2.tolist()[0]]
+ result = Index(vals1 + vals2)
+ else:
+ result = idx1.copy()
+
+ return result.sort_values() if sort else result
+
+ def merge_frames(self, df1, df2, sort):
+ new_index = self.merge_indexes(df1.columns, df2.columns, sort)
+ df1 = df1.reindex(new_index, axis=1)
+ df2 = df2.reindex(new_index, axis=1)
+
+ values = np.vstack([df1.values[0, :], df2.values[0, :]])
+ result = DataFrame(values, columns=new_index)
+ return result
+
+ @pytest.mark.parametrize('input_type', ['series', 'dict'])
+ @pytest.mark.parametrize('special', [True, False])
+ @pytest.mark.parametrize('compare', ['lt', 'eq', 'gt', 'dups'])
+ def test_append_series_dict(self, compare, special, input_type):
+ # When appending a Series or dict, the resulting columns come unsorted
+ # and no warning is raised.
+
+ sorts = False
+ warns = False
+
+ df1, df2 = self.generate_frames(compare, special)
+ if input_type == 'series':
+ other = df2.loc[0]
+ else:
+ other = df2.loc[0].to_dict()
+ if compare == 'dups':
+ return
+
+ ctx = pytest.warns(FutureWarning) if warns else pytest.warns(None)
+ expected = self.merge_frames(df1, df2, sorts)
+ with ctx:
+ result = df1.append(other, ignore_index=True, sort=None)
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('input_type', ['[series]', '[dict]'])
+ @pytest.mark.parametrize('special', [True, False])
+ @pytest.mark.parametrize('compare', ['lt', 'eq', 'gt']) # dups won't work
+ def test_append_list_of_series_dict(self, compare, special, input_type):
+ # When appending a list of Series or list of dicts, the behavior is
+ # as specified below.
+
+ if compare in ('gt', 'eq'):
+ sorts = False
+ warns = False
+ else:
+ sorts = True
+ warns = not special
+
+ df1, df2 = self.generate_frames(compare, special)
+ if input_type == '[series]':
+ other = [df2.loc[0]]
+ else:
+ other = [df2.loc[0].to_dict()]
+
+ ctx = pytest.warns(FutureWarning) if warns else pytest.warns(None)
+ expected = self.merge_frames(df1, df2, sorts)
+ with ctx:
+ result = df1.append(other, ignore_index=True, sort=None)
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('input_type', ['dataframe', '[dataframe]'])
+ @pytest.mark.parametrize('special', [True, False])
+ @pytest.mark.parametrize('compare', ['lt', 'eq', 'gt', 'dups'])
+ def test_append_dframe_list_of_dframe(self, compare, special, input_type):
+ # When appenindg a DataFrame of list of DataFrames, the behavior is as
+ # specified below.
+
+ if compare in ('dups', 'eq'):
+ sorts = False
+ warns = False
+ else:
+ sorts = True
+ warns = not special
+
+ df1, df2 = self.generate_frames(compare, special)
+ if input_type == 'dataframe':
+ other = df2
+ else:
+ other = [df2]
+
+ ctx = pytest.warns(FutureWarning) if warns else pytest.warns(None)
+ expected = self.merge_frames(df1, df2, sorts)
+ with ctx:
+ result = df1.append(other, ignore_index=True, sort=None)
+ assert_frame_equal(result, expected)
+
+
+class TestAppendColumnsIndex(object):
+ @pytest.mark.parametrize('idx_name3', [None, 'foo', 'bar', 'baz'])
+ @pytest.mark.parametrize('idx_name2', [None, 'foo', 'bar', 'baz'])
+ @pytest.mark.parametrize('idx_name1', [None, 'foo', 'bar', 'baz'])
+ def test_preserve_index_name(self, sort, idx_name1, idx_name2, idx_name3):
+ # When appending, the name of the indexes
+ # of the base DataFrame must always be
+ # preserved in the result.
+
+ df1 = pd.DataFrame([[1, 2, 3]])
+ df2 = pd.DataFrame([[4, 5, 6]], index=[1])
+ df3 = pd.DataFrame([[7, 8, 9]], index=[2])
+
+ df1.columns.name = idx_name1
+ df2.columns.name = idx_name2
+ df3.columns.name = idx_name3
+
+ # append []
+ result = df1.append([], sort=sort)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ result = df1.append([df2], sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ expected.columns.name = idx_name1
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ result = df1.append([df2, df3], sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ expected.columns.name = idx_name1
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('index', indexes, ids=cls_name)
+ def test_preserve_index_type(self, sort, index):
+ # when there's only one index type in the inputs,
+ # it must be preserved in the output.
+
+ # basic
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index)
+ df2 = pd.DataFrame([[4, 5, 6]], index=[1], columns=index)
+ result = df1.append(df2, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)
+ assert_frame_equal(result, expected)
+
+ # big.append(small)
+ big = pd.DataFrame([[1, 2, 3]], columns=index)
+ small = pd.DataFrame([[4, 5]], index=[1], columns=index[:2])
+ result = big.append(small, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, np.nan]], columns=index)
+ assert_frame_equal(result, expected)
+
+ # small.append(big)
+ small = pd.DataFrame([[1, 2]], columns=index[:2])
+ big = pd.DataFrame([[3, 4, 5]], index=[1], columns=index)
+ result = small.append(big, sort=sort)
+ expected = pd.DataFrame([[1, 2, np.nan], [3, 4, 5]], columns=index)
+ assert_frame_equal(result, expected)
+
+ def test_ignore_empty_index_dtype(self, sort):
+ # When one of the indexes is empty and of object dtype, it should be
+ # ignored in the result (as empty).
+
+ df1 = pd.DataFrame()
+ df2 = pd.DataFrame([[11, 12, 13]], columns=[1, 2, 3])
+
+ result1 = df1.append(df2, sort=sort)
+ result2 = df2.append(df1, sort=sort)
+
+ expected = df2.copy()
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, expected)
+
+ def test_account_empty_index_dtype(self, sort):
+ # When one of the indexes is empty and of dtype different from object,
+ # it should not be ignored when calculating the result dtype.
+
+ df1 = pd.DataFrame(columns=pd.Float64Index([]))
+ df2 = pd.DataFrame([[11, 12, 13]], columns=[1, 2, 3])
+
+ result1 = df1.append(df2, sort=sort)
+ result2 = df2.append(df1, sort=sort)
+
+ expected = df2.copy()
+ expected.columns = [1.0, 2.0, 3.0]
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, expected)
+
+ @pytest.mark.parametrize('index2', indexes, ids=cls_name)
+ @pytest.mark.parametrize('index1', indexes, ids=cls_name)
+ def test_preserve_index_values_without_sort(self, index1, index2):
+ # When appending indexes of different types, we want
+ # the resulting index to preserve the exact indexes
+ # values.
+
+ # Related to GH13626
+ from pandas.core.dtypes.generic import (
+ ABCDatetimeIndex, ABCMultiIndex, ABCTimedeltaIndex
+ )
+ if isinstance(index1, ABCMultiIndex):
+ if isinstance(index2, ABCDatetimeIndex):
+ pytest.xfail("MultiIndex + DatetimeIndex produces bad value")
+ if isinstance(index2, ABCTimedeltaIndex):
+ pytest.xfail("MultiIndex + TimedeltaIndex produces bad value")
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=index2, index=[1])
+ result = df1.append(df2, sort=False)
+ for value in index1:
+ assert value in result.columns
+ for value in index2:
+ assert value in result.columns
+
+ @pytest.mark.parametrize(
+ 'index1, index2',
+ [(i1, i2)
+ for group in index_sort_groups
+ for i1, i2 in product(group, repeat=2)],
+ ids=cls_name
+ )
+ def test_preserve_index_values_with_sort(self, index1, index2):
+ # When appending indexes of different types, we want
+ # the resulting index to preserve the exact indexes
+ # values.
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=index2, index=[1])
+ result = df1.append(df2, sort=True)
+ for value in index1:
+ assert value in result.columns
+ for value in index2:
+ assert value in result.columns
+
+ @pytest.mark.parametrize('col_index', indexes_with_dups, ids=cls_name)
+ def test_good_duplicates_without_sort(self, col_index):
+ # When all indexes have the same identity (a is b), duplicates should
+ # be allowed and append works.
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=col_index)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=col_index)
+
+ # df1.append([])
+ result = df1.append([], sort=False)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # df1.append([df2])
+ result = df1.append([df2], ignore_index=True, sort=False)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ expected.columns = col_index
+ assert_frame_equal(result, expected)
+
+ # df1.append([df2, df2])
+ result = df1.append([df2, df2], ignore_index=True, sort=False)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [4, 5, 6]])
+ expected.columns = col_index
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('col_index', indexes_with_dups, ids=cls_name)
+ def test_bad_duplicates_without_sort(self, col_index):
+ # When the indexes do not share a common identity, duplicates are not
+ # allowed and append raises.
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=col_index)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=col_index)
+ df3 = pd.DataFrame([[7, 8, 9]], columns=col_index.copy()) # different
+ ctx = pytest.raises(InvalidIndexError,
+ match=r'Indexes with duplicates.*a is b.*')
+ with ctx:
+ result = df1.append([df3], sort=False)
+ with ctx:
+ result = df1.append([df2, df3], sort=False)
+ with ctx:
+ result = df1.append([df3, df2], sort=False)
+ with ctx:
+ result = df1.append([df3, df3], sort=False)
+
+ @pytest.mark.parametrize('col_index', indexes_with_dups, ids=cls_name)
+ def test_duplicates_with_sort(self, col_index):
+ # When sort=True, indexes with duplicate values are not be allowed.
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=col_index)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=col_index.copy())
+ ctx = pytest.raises(InvalidIndexError,
+ match=r'When sort=True, indexes with dupl.*')
+
+ with ctx:
+ result = df1.append([], sort=True)
+ with ctx:
+ result = df1.append([df1], sort=True)
+ with ctx:
+ result = df1.append([df2], sort=True)
+ with ctx:
+ result = df1.append([df1, df1], sort=True)
+ with ctx:
+ result = df1.append([df1, df2], sort=True)
+ with ctx:
+ result = df1.append([df2, df1], sort=True)
+ with ctx:
+ result = df1.append([df2, df2], sort=True)
+
+ def test_nosort_basic(self):
+ # When sort=False, the resulting columns come
+ # in the order that they appear in the inputs.
+
+ nan = np.nan
+
+ # NUMERIC INDEX TESTS
+
+ # append []
+ df = pd.DataFrame([[1, 2, 3]], columns=[0, 1, 2])
+ result = df.append([], sort=False)
+ expected = df[[0, 1, 2]]
+ assert_frame_equal(result, expected)
+
+ df = pd.DataFrame([[1, 2, 3]], columns=[2, 1, 0])
+ result = df.append([], sort=False)
+ expected = df[[2, 1, 0]]
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ df1 = pd.DataFrame([[1, 2]], columns=[0.0, 1.0])
+ df2 = pd.DataFrame([[1, 2]], columns=[0.5, 1.5], index=[1])
+ result = df1.append(df2, sort=False)
+ expected = pd.DataFrame([[1, 2, nan, nan],
+ [nan, nan, 1, 2]],
+ columns=[0.0, 1.0, 0.5, 1.5])
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ df1 = pd.DataFrame([[1, 2]], columns=[0.0, 1.0])
+ df2 = pd.DataFrame([[1, 2]], columns=[0.3, 1.3], index=[1])
+ df3 = pd.DataFrame([[1, 2]], columns=[0.6, 1.6], index=[2])
+ result = df1.append([df2, df3], sort=False)
+ expected = pd.DataFrame([[1, 2, nan, nan, nan, nan],
+ [nan, nan, 1, 2, nan, nan],
+ [nan, nan, nan, nan, 1, 2]],
+ columns=[0.0, 1.0, 0.3, 1.3, 0.6, 1.6])
+ assert_frame_equal(result, expected)
+
+ # STRING INDEX TESTS
+
+ # append []
+ df = pd.DataFrame([[1, 2, 3]], columns=['a', 'b', 'c'])
+ result = df.append([], sort=False)
+ expected = df[['a', 'b', 'c']]
+ assert_frame_equal(result, expected)
+
+ df = pd.DataFrame([[1, 2, 3]], columns=['c', 'b', 'a'])
+ result = df.append([], sort=False)
+ expected = df[['c', 'b', 'a']]
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ df1 = pd.DataFrame([[1, 2]], columns=['a', 'c'])
+ df2 = pd.DataFrame([[1, 2]], columns=['b', 'd'], index=[1])
+ result = df1.append(df2, sort=False)
+ expected = pd.DataFrame([[1, 2, nan, nan],
+ [nan, nan, 1, 2]],
+ columns=['a', 'c', 'b', 'd'])
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ df1 = pd.DataFrame([[1, 2]], columns=['a', 'd'])
+ df2 = pd.DataFrame([[1, 2]], columns=['b', 'e'], index=[1])
+ df3 = pd.DataFrame([[1, 2]], columns=['c', 'f'], index=[2])
+ result = df1.append([df2, df3], sort=False)
+ expected = pd.DataFrame([[1, 2, nan, nan, nan, nan],
+ [nan, nan, 1, 2, nan, nan],
+ [nan, nan, nan, nan, 1, 2]],
+ columns=['a', 'd', 'b', 'e', 'c', 'f'])
+ assert_frame_equal(result, expected)
+
+ def test_sort_basic(self):
+ # When sort=True, the resulting columns must come
+ # out sorted.
+
+ nan = np.nan
+
+ # NUMERIC INDEX TESTS
+
+ # append []
+ df = pd.DataFrame([[1, 2, 3]], columns=[0, 1, 2])
+ result = df.append([], sort=True)
+ expected = df[[0, 1, 2]]
+ assert_frame_equal(result, expected)
+
+ df = pd.DataFrame([[1, 2, 3]], columns=[2, 1, 0])
+ result = df.append([], sort=True)
+ expected = df[[0, 1, 2]]
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ df1 = pd.DataFrame([[1, 2]], columns=[0.0, 1.0])
+ df2 = pd.DataFrame([[1, 2]], columns=[0.5, 1.5], index=[1])
+ result = df1.append(df2, sort=True)
+ expected = pd.DataFrame([[1, nan, 2, nan],
+ [nan, 1, nan, 2]],
+ columns=[0.0, 0.5, 1.0, 1.5])
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ df1 = pd.DataFrame([[1, 2]], columns=[0.0, 1.0])
+ df2 = pd.DataFrame([[1, 2]], columns=[0.3, 1.3], index=[1])
+ df3 = pd.DataFrame([[1, 2]], columns=[0.6, 1.6], index=[2])
+ result = df1.append([df2, df3], sort=True)
+ expected = pd.DataFrame([[1, nan, nan, 2, nan, nan],
+ [nan, 1, nan, nan, 2, nan],
+ [nan, nan, 1, nan, nan, 2]],
+ columns=[0.0, 0.3, 0.6, 1.0, 1.3, 1.6])
+ assert_frame_equal(result, expected)
+
+ # STRING INDEX TESTS
+
+ # append []
+ df = pd.DataFrame([[1, 2, 3]], columns=['a', 'b', 'c'])
+ result = df.append([], sort=True)
+ expected = df[['a', 'b', 'c']]
+ assert_frame_equal(result, expected)
+
+ df = pd.DataFrame([[1, 2, 3]], columns=['c', 'b', 'a'])
+ result = df.append([], sort=True)
+ expected = df[['a', 'b', 'c']]
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ df1 = pd.DataFrame([[1, 2]], columns=['a', 'c'])
+ df2 = pd.DataFrame([[1, 2]], columns=['b', 'd'], index=[1])
+ result = df1.append(df2, sort=True)
+ expected = pd.DataFrame([[1, nan, 2, nan],
+ [nan, 1, nan, 2]],
+ columns=['a', 'b', 'c', 'd'])
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ df1 = pd.DataFrame([[1, 2]], columns=['a', 'd'])
+ df2 = pd.DataFrame([[1, 2]], columns=['b', 'e'], index=[1])
+ df3 = pd.DataFrame([[1, 2]], columns=['c', 'f'], index=[2])
+ result = df1.append([df2, df3], sort=True)
+ expected = pd.DataFrame([[1, nan, nan, 2, nan, nan],
+ [nan, 1, nan, nan, 2, nan],
+ [nan, nan, 1, nan, nan, 2]],
+ columns=['a', 'b', 'c', 'd', 'e', 'f'])
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('index2', indexes, ids=cls_name)
+ @pytest.mark.parametrize('index1', indexes, ids=cls_name)
+ def test_index_types_without_sort(self, index1, index2):
+ # We should be able to append to a DataFrame
+ # regardless of the type of its index.
+
+ # TODO: check end of append and create tests (empty / IntervalIndex)
+ # TODO: implement different way for df.append([])
+ from pandas.core.dtypes.generic import ABCIntervalIndex
+ if isinstance(index1, ABCIntervalIndex):
+ pytest.xfail("Cannot do df[interval] for IntervalIndex")
+
+ # the code below should not raise any exceptions
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=index2, index=[1])
+ df1.append([], sort=False)
+ df1.append([df2], sort=False)
+ df1.append([df2, df2], sort=False)
+
+ @pytest.mark.parametrize(
+ 'index1, index2',
+ [(i1, i2)
+ for group in index_sort_groups
+ for i1, i2 in product(group, repeat=2)],
+ ids=cls_name
+ )
+ def test_index_types_with_possible_sort(self, index1, index2):
+ # When the result of joining two indexes is sortable,
+ # we should not raise any exceptions.
+
+ # TODO: check end of append and create tests (empty / IntervalIndex)
+ # TODO: implement different way for df.append([])
+ from pandas.core.dtypes.generic import ABCIntervalIndex
+ if isinstance(index1, ABCIntervalIndex):
+ pytest.xfail("Cannot do df[interval] for IntervalIndex")
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=index2, index=[1])
+ df1.append([], sort=True) # sorts the original frame
+ df1.append([df2], sort=True)
+ df1.append([df2, df2], sort=True)
+
+ @pytest.mark.parametrize(
+ 'index1, index2',
+ [(i1, i2)
+ for g1, g2 in product(index_sort_groups, repeat=2)
+ # different sort groups
+ if type(g1[0]) != type(g2[0])
+ for i1, i2 in product(g1, g2)],
+ ids=cls_name
+ )
+ def test_index_types_with_impossible_sort(self, index1, index2):
+ # When the result of joining two indexes is not sortable,
+ # we should raise an exception.
+
+ # TODO: check end of append and create tests (empty / IntervalIndex)
+ # TODO: implement different way for df.append([])
+ from pandas.core.dtypes.generic import ABCIntervalIndex
+ if isinstance(index1, ABCIntervalIndex):
+ pytest.xfail("Cannot do df[interval] for IntervalIndex")
+
+ err_msg = r'The resulting columns could not be sorted\..*'
+
+ df1 = pd.DataFrame([[1, 2, 3]], columns=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], columns=index2, index=[1])
+
+ with pytest.raises(TypeError, match=err_msg):
+ df1.append([df2], sort=True)
+ with pytest.raises(TypeError, match=err_msg):
+ df1.append([df2, df2], sort=True)
+
+
+class TestAppendRowsIndex(object):
+ @pytest.mark.parametrize('idx_name3', [None, 'foo', 'bar', 'baz'])
+ @pytest.mark.parametrize('idx_name2', [None, 'foo', 'bar', 'baz'])
+ @pytest.mark.parametrize('idx_name1', [None, 'foo', 'bar', 'baz'])
+ def test_preserve_index_name(self, sort, idx_name1, idx_name2, idx_name3):
+ # When appending, the name of the indexes
+ # of the base DataFrame must always be
+ # preserved in the result.
+
+ df1 = pd.DataFrame([[1, 2, 3]])
+ df2 = pd.DataFrame([[4, 5, 6]], index=[1])
+ df3 = pd.DataFrame([[7, 8, 9]], index=[2])
+
+ df1.index.name = idx_name1
+ df2.index.name = idx_name2
+ df3.index.name = idx_name3
+
+ # append []
+ result = df1.append([], sort=sort)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ # append [df]
+ result = df1.append([df2], sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ expected.index.name = idx_name1
+ assert_frame_equal(result, expected)
+
+ # append [df, df]
+ result = df1.append([df2, df3], sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ expected.index.name = idx_name1
+ assert_frame_equal(result, expected)
+
+ def test_ignore_empty_index_dtype(self, sort):
+ # When one of the indexes is empty and of object dtype, it should be
+ # ignored in the result (as empty).
+
+ df1 = pd.DataFrame()
+ df2 = pd.DataFrame([[11], [12], [13]], index=[1, 2, 3])
+
+ result1 = df1.append(df2, sort=sort)
+ result2 = df2.append(df1, sort=sort)
+
+ expected = df2.copy()
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, expected)
+
+ def test_account_empty_index_dtype(self, sort):
+ # When one of the indexes is empty and of dtype different from object,
+ # it should not be ignored when calculating the result dtype.
+
+ df1 = pd.DataFrame(index=pd.Float64Index([]))
+ df2 = pd.DataFrame([[11], [12], [13]], index=[1, 2, 3])
+
+ result1 = df1.append(df2, sort=sort)
+ result2 = df2.append(df1, sort=sort)
+
+ expected = df2.copy()
+ expected.index = [1.0, 2.0, 3.0]
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, expected)
+
+ @pytest.mark.parametrize('index', indexes, ids=cls_name)
+ def test_preserve_index_type(self, sort, index):
+ # when there's only one index type in the inputs,
+ # it must be preserved in the output.
+
+ index1 = index[:1]
+ index2 = index[1:2]
+ index_comb = index1.append(index2)
+
+ df1 = pd.DataFrame([[1, 2, 3]], index=index1)
+ df2 = pd.DataFrame([[4, 5, 6]], index=index2)
+ result = df1.append(df2, sort=sort)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=index_comb)
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('index2', indexes, ids=cls_name)
+ @pytest.mark.parametrize('index1', indexes, ids=cls_name)
+ def test_preserve_index_values(self, sort, index1, index2):
+ # When appending indexes of different types, we want
+ # the resulting index to preserve the exact indexes
+ # values.
+
+ # Related to GH13626
+ from pandas.core.dtypes.generic import (
+ ABCDatetimeIndex, ABCMultiIndex, ABCTimedeltaIndex
+ )
+ if isinstance(index1, ABCMultiIndex):
+ if isinstance(index2, ABCDatetimeIndex):
+ pytest.xfail("MultiIndex + DatetimeIndex produces bad value")
+ if isinstance(index2, ABCTimedeltaIndex):
+ pytest.xfail("MultiIndex + TimedeltaIndex produces bad value")
+
+ # Concat raises a TypeError when appending a CategoricalIndex
+ # with another type
+ from pandas.core.dtypes.generic import ABCCategoricalIndex
+ if isinstance(index1, ABCCategoricalIndex):
+ pytest.xfail("Cannot have a CategoricalIndex append to another typ")
+
+ df1 = pd.DataFrame([[1, 2, 3]], index=index1[:1])
+ df2 = pd.DataFrame([[4, 5, 6]], index=index2[:1])
+ result = df1.append(df2, sort=sort)
+ assert index1[0] in result.index
+ assert index2[0] in result.index
+
+ def test_duplicates_without_verify_integrity(self):
+ # When verify_integrity=False, the function should
+ # allow duplicate values in the rows index.
+
+ raise NotImplementedError
+
+ def test_duplicates_with_verify_integrity(self):
+ # When verify_integrity=True, the function should
+ # not allow duplicate values in the rows index (whether
+ # in the input or output).
+
+ raise NotImplementedError
+
+ def test_ignore_index(self):
+ # When ignore_index=True, the function should completely
+ # ignore the input indexes and generate one that is brand
+ # new (RangeIndex).
+
+ raise NotImplementedError
+
+ def test_warning_ignore_index_and_verify_integrity(self):
+ # It makes no sense to set verify_integrity=True when
+ # ignore_index=True. To warn of a possible user
+ # misunderstanding, append should raise a warning in
+ # this situation.
+
+ raise NotImplementedError
+
+
+class TestAppendDangling(object):
+ """Tests that have not been concretized yet
+ """
+
+ def test_append_unnamed_series_raises(self, sort):
+ dict_msg = 'Can only append a dict if ignore_index=True'
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ dict = {
+ 0: 7,
+ 1: 8,
+ 2: 9
+ }
+ with pytest.raises(TypeError, match=dict_msg):
+ df.append(dict, sort=sort)
+ with pytest.raises(TypeError, match=dict_msg):
+ df.append([dict], sort=sort)
+ with pytest.raises(TypeError, match=dict_msg):
+ df.append([dict, dict], sort=sort)
+
+ series_msg = 'Can only append a Series if ignore_index=True or .*'
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ series = pd.Series([7, 8, 9])
+ with pytest.raises(TypeError, match=series_msg):
+ df.append(series, sort=sort)
+ with pytest.raises(TypeError, match=series_msg):
+ df.append([series], sort=sort)
+ with pytest.raises(TypeError, match=series_msg):
+ df.append([series, series], sort=sort)
+
+ indexes = [
+ None,
+ pd.Index([0, 1]),
+ pd.Index(['a', 'b']),
+ pd.Index(['a', 'b'], name='foo')
+ ]
+
+ @pytest.mark.parametrize('index1', indexes, ids=lambda x: repr(x))
+ @pytest.mark.parametrize('index2', indexes, ids=lambda x: repr(x))
+ def test_append_ignore_index(self, sort, index1, index2):
+ # when appending with ignore_index=True,
+ # all index content must be forgotten
+ df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=index1)
+ df2 = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=index2)
+
+ result = df1.append(df2, ignore_index=True)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6],
+ [1, 2, 3], [4, 5, 6]])
+ assert_frame_equal(result, expected)
+
+ result = df1.append([df2], ignore_index=True)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6],
+ [1, 2, 3], [4, 5, 6]])
+ assert_frame_equal(result, expected)
+
+ result = df1.append([df2, df2], ignore_index=True)
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6],
+ [1, 2, 3], [4, 5, 6],
+ [1, 2, 3], [4, 5, 6]])
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 762b04cc3bd4f..b9a5caeb4a2e7 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -904,7 +904,7 @@ def test_append_same_columns_type(self, index):
ser_index = index[:2]
ser = pd.Series([7, 8], index=ser_index, name=2)
result = df.append(ser)
- expected = pd.DataFrame([[1., 2., 3.], [4, 5, 6], [7, 8, np.nan]],
+ expected = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, np.nan]],
index=[0, 1, 2],
columns=index)
assert_frame_equal(result, expected)
@@ -958,13 +958,13 @@ def test_append_different_columns_types_raises(
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append)
ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other,
name=2)
- with pytest.raises(TypeError):
+ with pytest.raises((AttributeError, ValueError, TypeError)):
df.append(ser)
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
columns=index_cannot_append_with_other)
ser = pd.Series([7, 8, 9], index=index_can_append, name=2)
- with pytest.raises(TypeError):
+ with pytest.raises((AttributeError, ValueError, TypeError)):
df.append(ser)
def test_append_dtype_coerce(self, sort):
| This is a WIP pull request, there are still a bunch of things to do. I'm posting it here so you can give me early feedback or point me in an direction if something is not looking good.
*PR will be split into multiple smaller ones, as suggested*
### How it is
DataFrame.append method is pretty confusing as it is right now. There are multiple codeways according to different input types (dict/Series/DataFrame or list of these) and the results are undefined in lots of edge cases.
### Objective
This PR aims to make the code a bit cleaner and define the behavior for `append` better. For example, the input type should not interfere with the result (i.e. whether `other` is a DataFrame, Series, etc.)
Also, I wanted to make changes to the behavior of `concat` easier. `append` moves its heavy-load to `concat` and, the way it was before, it was hard to understand what was expected from `concat`. Now it's a bit easier.
### Behavior changes
(there might be some things I forgot)
- Accepted inputs for `other` are now exactly `dict`, `Series`, `DataFrame` or a list of any of those. Empty lists are also accepted. Wrong input types produce a `TypeError`
- The behavior for each type of input now is exactly the same for equivalent inputs.
- Empty columns and empty indexes will not be used when calculating the result dtypes. This is something I will think a bit about still, if you have any early suggestions, it will help.
- DataFrames containing any types of columns can be appended now. In contrast, `IntervalIndex`, `PeriodIndex` and `MultiIndex` raised errors before.
- `sort=False` will never sort the resulting columns. `append` used to sort them in some cases.
- ~~`sort=True` will raise a TypeError when the resulting columns cannot be sorted~~ (it may be better to just issue a RuntimeWarning)
- An `InvalidIndexError` will be raised if there are duplicates in the columns indexes and all indexes ~~are not the same (id)~~ do no share the same values (will raise an issue to discuss this)
### Task list (not in order)
- [x] Implement sort=None + regression tests
- [ ] Behavior when sort is not possible (warn (?))
- [ ] Move towards deprecating sort=None
- [x] Behavior for empty indexes
- [ ] Some test cases
- [ ] Check/Improve performance
- [ ] Beautiful PR
- [ ] Write Docs
- [ ] What's new
- [ ] Related issues | https://api.github.com/repos/pandas-dev/pandas/pulls/22915 | 2018-10-01T00:24:13Z | 2018-11-23T03:35:01Z | null | 2018-11-23T03:35:01Z |
CLN: Flake8 E741 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 986fe347898f5..c2d2e447d1616 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1,3 +1,5 @@
+# pylint: disable=E1101
+# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
@@ -9,11 +11,9 @@
labeling information
"""
from __future__ import division
-# pylint: disable=E1101,E1103
-# pylint: disable=W0212,W0231,W0703,W0622
-import functools
import collections
+import functools
import itertools
import sys
import warnings
@@ -22,7 +22,20 @@
import numpy as np
import numpy.ma as ma
-from pandas.core.accessor import CachedAccessor
+from pandas._libs import lib, algos as libalgos
+
+from pandas.util._decorators import (Appender, Substitution,
+ rewrite_axis_style_signature,
+ deprecate_kwarg)
+from pandas.util._validators import (validate_bool_kwarg,
+ validate_axis_style_args)
+
+from pandas import compat
+from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
+ OrderedDict, PY36, raise_with_traceback,
+ string_and_binary_types)
+from pandas.compat.numpy import function as nv
+
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
@@ -62,46 +75,32 @@
from pandas.core.dtypes.concat import _get_sliced_frame_result_type
from pandas.core.dtypes.missing import isna, notna
-
+from pandas.core import algorithms
+from pandas.core import common as com
+from pandas.core import nanops
+from pandas.core import ops
+from pandas.core.accessor import CachedAccessor
+from pandas.core.arrays import Categorical, ExtensionArray
+from pandas.core.config import get_option
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
+from pandas.core.indexes import base as ibase
+from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.period import PeriodIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
-from pandas.core.arrays import Categorical, ExtensionArray
-import pandas.core.algorithms as algorithms
-from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
- OrderedDict, raise_with_traceback,
- string_and_binary_types)
-from pandas import compat
-from pandas.compat import PY36
-from pandas.compat.numpy import function as nv
-from pandas.util._decorators import (Appender, Substitution,
- rewrite_axis_style_signature,
- deprecate_kwarg)
-from pandas.util._validators import (validate_bool_kwarg,
- validate_axis_style_args)
-
-from pandas.core.indexes.period import PeriodIndex
-from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas.core.indexes.timedeltas import TimedeltaIndex
-import pandas.core.indexes.base as ibase
-import pandas.core.common as com
-import pandas.core.nanops as nanops
-import pandas.core.ops as ops
-import pandas.io.formats.console as console
-import pandas.io.formats.format as fmt
+from pandas.io.formats import console
+from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
-import pandas.plotting._core as gfx
-
-from pandas._libs import lib, algos as libalgos
-from pandas.core.config import get_option
+import pandas.plotting._core as gfx
# ---------------------------------------------------------------------
# Docstring templates
@@ -1003,7 +1002,7 @@ def dot(self, other):
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
- '{l} vs {r}'.format(l=lvals.shape,
+ '{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index fd8e17c369f5a..a0c3243ddbc3c 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,9 +1,16 @@
-from sys import getsizeof
import operator
from datetime import timedelta
+from sys import getsizeof
import numpy as np
+
+from pandas import compat
+
from pandas._libs import index as libindex
+from pandas.util._decorators import Appender, cache_readonly
+
+from pandas.compat import lrange, range, get_range_parameters
+from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_integer,
@@ -11,18 +18,12 @@
is_timedelta64_dtype,
is_int64_dtype)
from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex
-
-from pandas import compat
-from pandas.compat import lrange, range, get_range_parameters
-from pandas.compat.numpy import function as nv
+from pandas.core.dtypes import concat as _concat
import pandas.core.common as com
+import pandas.core.indexes.base as ibase
from pandas.core import ops
from pandas.core.indexes.base import Index, _index_shared_docs
-from pandas.util._decorators import Appender, cache_readonly
-import pandas.core.dtypes.concat as _concat
-import pandas.core.indexes.base as ibase
-
from pandas.core.indexes.numeric import Int64Index
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 150518aadcfd9..b3c913f21dd86 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1,10 +1,16 @@
# pylint: disable=W0223
import textwrap
import warnings
+
import numpy as np
-from pandas.compat import range, zip
+from pandas._libs.indexing import _NDFrameIndexerBase
+from pandas.util._decorators import Appender
+
+from pandas.errors import AbstractMethodError
+
import pandas.compat as compat
-from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
+from pandas.compat import range, zip
+
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
@@ -14,14 +20,11 @@
is_scalar,
is_sparse,
ensure_platform_int)
+from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.missing import isna, _infer_fill_value
-from pandas.errors import AbstractMethodError
-from pandas.util._decorators import Appender
-
-from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
-from pandas._libs.indexing import _NDFrameIndexerBase
+from pandas.core.index import Index, MultiIndex
# the supported indexers
@@ -304,8 +307,7 @@ def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
- # TODO: Panel, DataFrame are not imported, remove?
- from pandas import Panel, DataFrame, Series # noqa
+ from pandas import Series
info_axis = self.obj._info_axis_number
# maybe partial set
@@ -553,14 +555,14 @@ def can_do_equal_len():
is_scalar(plane_indexer[0])):
return False
- l = len(value)
item = labels[0]
index = self.obj[item].index
+ values_len = len(value)
# equal len list/ndarray
- if len(index) == l:
+ if len(index) == values_len:
return True
- elif lplane_indexer == l:
+ elif lplane_indexer == values_len:
return True
return False
@@ -717,8 +719,8 @@ def ravel(i):
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
- l = len(indexer[1])
- ser = np.tile(ser, l).reshape(l, -1).T
+ len_indexer = len(indexer[1])
+ ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
@@ -2077,9 +2079,9 @@ def _validate_key(self, key, axis):
elif is_list_like_indexer(key):
# check that the key does not exceed the maximum size of the index
arr = np.array(key)
- l = len(self.obj._get_axis(axis))
+ len_axis = len(self.obj._get_axis(axis))
- if len(arr) and (arr.max() >= l or arr.min() < -l):
+ if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
raise IndexError("positional indexers are out-of-bounds")
else:
raise ValueError("Can only index by location with "
@@ -2136,9 +2138,8 @@ def _validate_integer(self, key, axis):
If 'key' is not a valid position in axis 'axis'
"""
- ax = self.obj._get_axis(axis)
- l = len(ax)
- if key >= l or key < -l:
+ len_axis = len(self.obj._get_axis(axis))
+ if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
def _getitem_tuple(self, tup):
@@ -2425,18 +2426,18 @@ def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
- l = len(target)
+ target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
- start += l
- if stop is None or stop > l:
- stop = l
+ start += target_len
+ if stop is None or stop > target_len:
+ stop = target_len
elif stop < 0:
- stop += l
+ stop += target_len
if step is None:
step = 1
elif step < 0:
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 7a1e72637f4ce..e27a1a623adb5 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -38,39 +38,41 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
-from datetime import datetime, date, timedelta
-from dateutil.parser import parse
import os
-from textwrap import dedent
import warnings
+from datetime import datetime, date, timedelta
+from textwrap import dedent
import numpy as np
+from dateutil.parser import parse
+
from pandas import compat
+from pandas import (Timestamp, Period, Series, DataFrame, # noqa:F401
+ Index, MultiIndex, Float64Index, Int64Index,
+ Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
+ Categorical, CategoricalIndex, IntervalIndex, Interval,
+ TimedeltaIndex)
+
+from pandas.util._move import (
+ BadMove as _BadMove,
+ move_into_mutable_buffer as _move_into_mutable_buffer,
+)
+from pandas.errors import PerformanceWarning
+
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
-from pandas import (Timestamp, Period, Series, DataFrame, # noqa
- Index, MultiIndex, Float64Index, Int64Index,
- Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
- Categorical, CategoricalIndex, IntervalIndex, Interval,
- TimedeltaIndex)
+from pandas.core import internals
from pandas.core.arrays import IntervalArray
+from pandas.core.generic import NDFrame
+from pandas.core.internals import BlockManager, make_block, _safe_reshape
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
-from pandas.core.generic import NDFrame
-from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
-from pandas.core.internals import BlockManager, make_block, _safe_reshape
-import pandas.core.internals as internals
-
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
-from pandas.util._move import (
- BadMove as _BadMove,
- move_into_mutable_buffer as _move_into_mutable_buffer,
-)
# check which compression libs we have installed
try:
@@ -187,16 +189,16 @@ def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
return Iterator(path_or_buf)
def read(fh):
- l = list(unpack(fh, encoding=encoding, **kwargs))
- if len(l) == 1:
- return l[0]
+ unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
+ if len(unpacked_obj) == 1:
+ return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except: # noqa: flake8
pass
- return l
+ return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fc9e415ed38f7..ff37036533b4f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1,26 +1,31 @@
+# pylint: disable-msg=E1101,W0613,W0603
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
-# pylint: disable-msg=E1101,W0613,W0603
-from datetime import datetime, date
-import time
-import re
import copy
import itertools
-import warnings
import os
+import re
+import time
+import warnings
+
+from datetime import datetime, date
from distutils.version import LooseVersion
import numpy as np
+from pandas import (Series, DataFrame, Panel, Index,
+ MultiIndex, Int64Index, isna, concat, to_datetime,
+ SparseSeries, SparseDataFrame, PeriodIndex,
+ DatetimeIndex, TimedeltaIndex)
+from pandas import compat
from pandas._libs import algos, lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas.errors import PerformanceWarning
-from pandas import compat
-from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
+from pandas.compat import PY3, range, lrange, string_types, filter
from pandas.core.dtypes.common import (
is_list_like,
@@ -34,27 +39,22 @@
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import config
-from pandas.core.config import get_option
-from pandas.core.sparse.array import BlockIndex, IntIndex
-from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.algorithms import match, unique
from pandas.core.arrays.categorical import (Categorical,
_factorize_from_iterables)
+from pandas.core.base import StringMixin
+from pandas.core.computation.pytables import Expr, maybe_expression
+from pandas.core.config import get_option
+from pandas.core.index import ensure_index
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
-from pandas.core.index import ensure_index
-from pandas.core.computation.pytables import Expr, maybe_expression
+from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
-from pandas import (Series, DataFrame, Panel, Index,
- MultiIndex, Int64Index, isna, concat, to_datetime,
- SparseSeries, SparseDataFrame, PeriodIndex,
- DatetimeIndex, TimedeltaIndex)
-
# versioning attribute
_version = '0.15.2'
@@ -161,10 +161,10 @@ class DuplicateWarning(Warning):
# formats
_FORMAT_MAP = {
- u('f'): 'fixed',
- u('fixed'): 'fixed',
- u('t'): 'table',
- u('table'): 'table',
+ u'f': 'fixed',
+ u'fixed': 'fixed',
+ u't': 'table',
+ u'table': 'table',
}
format_deprecate_doc = """
@@ -179,36 +179,36 @@ class DuplicateWarning(Warning):
# map object types
_TYPE_MAP = {
- Series: u('series'),
- SparseSeries: u('sparse_series'),
- DataFrame: u('frame'),
- SparseDataFrame: u('sparse_frame'),
- Panel: u('wide'),
+ Series: u'series',
+ SparseSeries: u'sparse_series',
+ DataFrame: u'frame',
+ SparseDataFrame: u'sparse_frame',
+ Panel: u'wide',
}
# storer class map
_STORER_MAP = {
- u('Series'): 'LegacySeriesFixed',
- u('DataFrame'): 'LegacyFrameFixed',
- u('DataMatrix'): 'LegacyFrameFixed',
- u('series'): 'SeriesFixed',
- u('sparse_series'): 'SparseSeriesFixed',
- u('frame'): 'FrameFixed',
- u('sparse_frame'): 'SparseFrameFixed',
- u('wide'): 'PanelFixed',
+ u'Series': 'LegacySeriesFixed',
+ u'DataFrame': 'LegacyFrameFixed',
+ u'DataMatrix': 'LegacyFrameFixed',
+ u'series': 'SeriesFixed',
+ u'sparse_series': 'SparseSeriesFixed',
+ u'frame': 'FrameFixed',
+ u'sparse_frame': 'SparseFrameFixed',
+ u'wide': 'PanelFixed',
}
# table class map
_TABLE_MAP = {
- u('generic_table'): 'GenericTable',
- u('appendable_series'): 'AppendableSeriesTable',
- u('appendable_multiseries'): 'AppendableMultiSeriesTable',
- u('appendable_frame'): 'AppendableFrameTable',
- u('appendable_multiframe'): 'AppendableMultiFrameTable',
- u('appendable_panel'): 'AppendablePanelTable',
- u('worm'): 'WORMTable',
- u('legacy_frame'): 'LegacyFrameTable',
- u('legacy_panel'): 'LegacyPanelTable',
+ u'generic_table': 'GenericTable',
+ u'appendable_series': 'AppendableSeriesTable',
+ u'appendable_multiseries': 'AppendableMultiSeriesTable',
+ u'appendable_frame': 'AppendableFrameTable',
+ u'appendable_multiframe': 'AppendableMultiFrameTable',
+ u'appendable_panel': 'AppendablePanelTable',
+ u'worm': 'WORMTable',
+ u'legacy_frame': 'LegacyFrameTable',
+ u'legacy_panel': 'LegacyPanelTable',
}
# axes map
@@ -1104,7 +1104,7 @@ def groups(self):
(getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
- g._v_name != u('table'))))
+ g._v_name != u'table')))
]
def walk(self, where="/"):
@@ -1297,8 +1297,8 @@ def error(t):
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
- pt = u('frame_table')
- tt = u('generic_table')
+ pt = u'frame_table'
+ tt = u'generic_table'
else:
raise TypeError(
"cannot create a storer if the object is not existing "
@@ -1312,10 +1312,10 @@ def error(t):
# we are actually a table
if format == 'table':
- pt += u('_table')
+ pt += u'_table'
# a storer node
- if u('table') not in pt:
+ if u'table' not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except KeyError:
@@ -1327,33 +1327,33 @@ def error(t):
# if we are a writer, determine the tt
if value is not None:
- if pt == u('series_table'):
+ if pt == u'series_table':
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
- tt = u('appendable_series')
+ tt = u'appendable_series'
elif index.nlevels > 1:
- tt = u('appendable_multiseries')
- elif pt == u('frame_table'):
+ tt = u'appendable_multiseries'
+ elif pt == u'frame_table':
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
- tt = u('appendable_frame')
+ tt = u'appendable_frame'
elif index.nlevels > 1:
- tt = u('appendable_multiframe')
- elif pt == u('wide_table'):
- tt = u('appendable_panel')
- elif pt == u('ndim_table'):
- tt = u('appendable_ndim')
+ tt = u'appendable_multiframe'
+ elif pt == u'wide_table':
+ tt = u'appendable_panel'
+ elif pt == u'ndim_table':
+ tt = u'appendable_ndim'
else:
# distiguish between a frame/table
- tt = u('legacy_panel')
+ tt = u'legacy_panel'
try:
fields = group.table._v_attrs.fields
- if len(fields) == 1 and fields[0] == u('value'):
- tt = u('legacy_frame')
+ if len(fields) == 1 and fields[0] == u'value':
+ tt = u'legacy_frame'
except IndexError:
pass
@@ -1699,7 +1699,7 @@ def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size """
- if _ensure_decoded(self.kind) == u('string'):
+ if _ensure_decoded(self.kind) == u'string':
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
@@ -1726,7 +1726,7 @@ def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
- if _ensure_decoded(self.kind) == u('string'):
+ if _ensure_decoded(self.kind) == u'string':
c = self.col
if c is not None:
if itemsize is None:
@@ -1881,9 +1881,9 @@ def __init__(self, values=None, kind=None, typ=None,
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
- self.dtype_attr = u("%s_dtype" % self.name)
+ self.dtype_attr = u'{}_dtype'.format(self.name)
self.meta = meta
- self.meta_attr = u("%s_meta" % self.name)
+ self.meta_attr = u'{}_meta'.format(self.name)
self.set_data(data)
self.set_metadata(metadata)
@@ -1929,19 +1929,19 @@ def set_kind(self):
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
- if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
+ if dtype.startswith(u'string') or dtype.startswith(u'bytes'):
self.kind = 'string'
- elif dtype.startswith(u('float')):
+ elif dtype.startswith(u'float'):
self.kind = 'float'
- elif dtype.startswith(u('complex')):
+ elif dtype.startswith(u'complex'):
self.kind = 'complex'
- elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
+ elif dtype.startswith(u'int') or dtype.startswith(u'uint'):
self.kind = 'integer'
- elif dtype.startswith(u('date')):
+ elif dtype.startswith(u'date'):
self.kind = 'datetime'
- elif dtype.startswith(u('timedelta')):
+ elif dtype.startswith(u'timedelta'):
self.kind = 'timedelta'
- elif dtype.startswith(u('bool')):
+ elif dtype.startswith(u'bool'):
self.kind = 'bool'
else:
raise AssertionError(
@@ -2184,14 +2184,14 @@ def convert(self, values, nan_rep, encoding, errors):
dtype = _ensure_decoded(self.dtype)
# reverse converts
- if dtype == u('datetime64'):
+ if dtype == u'datetime64':
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
- elif dtype == u('timedelta64'):
+ elif dtype == u'timedelta64':
self.data = np.asarray(self.data, dtype='m8[ns]')
- elif dtype == u('date'):
+ elif dtype == u'date':
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
@@ -2199,12 +2199,12 @@ def convert(self, values, nan_rep, encoding, errors):
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
- elif dtype == u('datetime'):
+ elif dtype == u'datetime':
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
- elif meta == u('category'):
+ elif meta == u'category':
# we have a categorical
categories = self.metadata
@@ -2237,7 +2237,7 @@ def convert(self, values, nan_rep, encoding, errors):
self.data = self.data.astype('O', copy=False)
# convert nans / decode
- if _ensure_decoded(self.kind) == u('string'):
+ if _ensure_decoded(self.kind) == u'string':
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding, errors=errors)
@@ -2547,12 +2547,12 @@ def read_array(self, key, start=None, stop=None):
else:
ret = node[start:stop]
- if dtype == u('datetime64'):
+ if dtype == u'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
- elif dtype == u('timedelta64'):
+ elif dtype == u'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
@@ -2563,13 +2563,13 @@ def read_array(self, key, start=None, stop=None):
def read_index(self, key, **kwargs):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
- if variety == u('multi'):
+ if variety == u'multi':
return self.read_multi_index(key, **kwargs)
- elif variety == u('block'):
+ elif variety == u'block':
return self.read_block_index(key, **kwargs)
- elif variety == u('sparseint'):
+ elif variety == u'sparseint':
return self.read_sparse_intindex(key, **kwargs)
- elif variety == u('regular'):
+ elif variety == u'regular':
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
@@ -2686,13 +2686,13 @@ def read_index_node(self, node, start=None, stop=None):
factory = self._get_index_factory(index_class)
kwargs = {}
- if u('freq') in node._v_attrs:
+ if u'freq' in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
- if u('tz') in node._v_attrs:
+ if u'tz' in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
- if kind in (u('date'), u('datetime')):
+ if kind in (u'date', u'datetime'):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding,
errors=self.errors),
@@ -2837,7 +2837,7 @@ def read(self, **kwargs):
class SeriesFixed(GenericFixed):
- pandas_kind = u('series')
+ pandas_kind = u'series'
attributes = ['name']
@property
@@ -2874,7 +2874,7 @@ def validate_read(self, kwargs):
class SparseSeriesFixed(SparseFixed):
- pandas_kind = u('sparse_series')
+ pandas_kind = u'sparse_series'
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
@@ -2883,7 +2883,7 @@ def read(self, **kwargs):
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
- kind=self.kind or u('block'),
+ kind=self.kind or u'block',
fill_value=self.fill_value,
name=self.name)
@@ -2898,7 +2898,7 @@ def write(self, obj, **kwargs):
class SparseFrameFixed(SparseFixed):
- pandas_kind = u('sparse_frame')
+ pandas_kind = u'sparse_frame'
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
@@ -3015,12 +3015,12 @@ def write(self, obj, **kwargs):
class FrameFixed(BlockManagerFixed):
- pandas_kind = u('frame')
+ pandas_kind = u'frame'
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
- pandas_kind = u('wide')
+ pandas_kind = u'wide'
obj_type = Panel
is_shape_reversed = True
@@ -3054,7 +3054,7 @@ class Table(Fixed):
metadata : the names of the metadata columns
"""
- pandas_kind = u('wide_table')
+ pandas_kind = u'wide_table'
table_type = None
levels = 1
is_table = True
@@ -3158,7 +3158,7 @@ def nrows_expected(self):
@property
def is_exists(self):
""" has this table been created """
- return u('table') in self.group
+ return u'table' in self.group
@property
def storable(self):
@@ -3837,7 +3837,7 @@ class WORMTable(Table):
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
- table_type = u('worm')
+ table_type = u'worm'
def read(self, **kwargs):
""" read the indices and the indexing array, calculate offset rows and
@@ -3865,7 +3865,7 @@ class LegacyTable(Table):
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
- table_type = u('legacy')
+ table_type = u'legacy'
ndim = 3
def write(self, **kwargs):
@@ -3962,8 +3962,8 @@ def read(self, where=None, columns=None, **kwargs):
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
- pandas_kind = u('frame_table')
- table_type = u('legacy_frame')
+ pandas_kind = u'frame_table'
+ table_type = u'legacy_frame'
obj_type = Panel
def read(self, *args, **kwargs):
@@ -3973,7 +3973,7 @@ def read(self, *args, **kwargs):
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
- table_type = u('legacy_panel')
+ table_type = u'legacy_panel'
obj_type = Panel
@@ -3981,7 +3981,7 @@ class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
- table_type = u('appendable')
+ table_type = u'appendable'
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
@@ -4172,13 +4172,13 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
values = self.selection.select_coords()
# delete the rows in reverse order
- l = Series(values).sort_values()
- ln = len(l)
+ sorted_series = Series(values).sort_values()
+ ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
- diff = l.diff()
+ diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
@@ -4196,7 +4196,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
- rows = l.take(lrange(g, pg))
+ rows = sorted_series.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
@@ -4210,8 +4210,8 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
- pandas_kind = u('frame_table')
- table_type = u('appendable_frame')
+ pandas_kind = u'frame_table'
+ table_type = u'appendable_frame'
ndim = 2
obj_type = DataFrame
@@ -4276,8 +4276,8 @@ def read(self, where=None, columns=None, **kwargs):
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
- pandas_kind = u('series_table')
- table_type = u('appendable_series')
+ pandas_kind = u'series_table'
+ table_type = u'appendable_series'
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@@ -4319,8 +4319,8 @@ def read(self, columns=None, **kwargs):
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
- pandas_kind = u('series_table')
- table_type = u('appendable_multiseries')
+ pandas_kind = u'series_table'
+ table_type = u'appendable_multiseries'
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
@@ -4334,8 +4334,8 @@ def write(self, obj, **kwargs):
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
- pandas_kind = u('frame_table')
- table_type = u('generic_table')
+ pandas_kind = u'frame_table'
+ table_type = u'generic_table'
ndim = 2
obj_type = DataFrame
@@ -4384,14 +4384,14 @@ def write(self, **kwargs):
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
- table_type = u('appendable_multiframe')
+ table_type = u'appendable_multiframe'
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self):
- return u('appendable_multi')
+ return u'appendable_multi'
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
@@ -4421,7 +4421,7 @@ def read(self, **kwargs):
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
- table_type = u('appendable_panel')
+ table_type = u'appendable_panel'
ndim = 3
obj_type = Panel
@@ -4592,26 +4592,26 @@ def _convert_index(index, encoding=None, errors='strict', format_type=None):
def _unconvert_index(data, kind, encoding=None, errors='strict'):
kind = _ensure_decoded(kind)
- if kind == u('datetime64'):
+ if kind == u'datetime64':
index = DatetimeIndex(data)
- elif kind == u('timedelta64'):
+ elif kind == u'timedelta64':
index = TimedeltaIndex(data)
- elif kind == u('datetime'):
+ elif kind == u'datetime':
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
- elif kind == u('date'):
+ elif kind == u'date':
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
- elif kind in (u('integer'), u('float')):
+ elif kind in (u'integer', u'float'):
index = np.asarray(data)
- elif kind in (u('string')):
+ elif kind in (u'string'):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
- elif kind == u('object'):
+ elif kind == u'object':
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
@@ -4621,11 +4621,11 @@ def _unconvert_index(data, kind, encoding=None, errors='strict'):
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None,
errors='strict'):
kind = _ensure_decoded(kind)
- if kind == u('datetime'):
+ if kind == u'datetime':
index = to_datetime(data)
- elif kind in (u('integer')):
+ elif kind in (u'integer'):
index = np.asarray(data, dtype=object)
- elif kind in (u('string')):
+ elif kind in (u'string'):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
else: # pragma: no cover
@@ -4730,7 +4730,7 @@ def _get_converter(kind, encoding, errors):
def _need_convert(kind):
kind = _ensure_decoded(kind)
- if kind in (u('datetime'), u('datetime64'), u('string')):
+ if kind in (u'datetime', u'datetime64', u'string'):
return True
return False
| - [x] towards #22122 ( test files still remaining for E741)
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Import optimiser, makes things alphabetical and also removes unused eg, `Float64Index`, in `packers.py`. Do people prefer this or not? | https://api.github.com/repos/pandas-dev/pandas/pulls/22913 | 2018-09-30T19:20:07Z | 2018-10-09T15:43:40Z | 2018-10-09T15:43:40Z | 2018-10-09T19:34:29Z |
API/DEPR: 'periods' argument instead of 'n' for PeriodIndex.shift() | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index a1a0857fe6365..62a2a358d3ba9 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -637,7 +637,7 @@ Deprecations
- :meth:`Series.str.cat` has deprecated using arbitrary list-likes *within* list-likes. A list-like container may still contain
many ``Series``, ``Index`` or 1-dimensional ``np.ndarray``, or alternatively, only scalar values. (:issue:`21950`)
- :meth:`FrozenNDArray.searchsorted` has deprecated the ``v`` parameter in favor of ``value`` (:issue:`14645`)
-- :func:`DatetimeIndex.shift` now accepts ``periods`` argument instead of ``n`` for consistency with :func:`Index.shift` and :func:`Series.shift`. Using ``n`` throws a deprecation warning (:issue:`22458`)
+- :func:`DatetimeIndex.shift` and :func:`PeriodIndex.shift` now accept ``periods`` argument instead of ``n`` for consistency with :func:`Index.shift` and :func:`Series.shift`. Using ``n`` throws a deprecation warning (:issue:`22458`, :issue:`22912`)
.. _whatsnew_0240.prior_deprecations:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1ce60510c6a69..e4ace2bfe1509 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -552,6 +552,7 @@ def shift(self, periods, freq=None):
See Also
--------
Index.shift : Shift values of Index.
+ PeriodIndex.shift : Shift values of PeriodIndex.
"""
return self._time_shift(periods=periods, freq=freq)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 92803ab5f52e0..96a18a628fcf9 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -14,7 +14,7 @@
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas import compat
-from pandas.util._decorators import cache_readonly
+from pandas.util._decorators import (cache_readonly, deprecate_kwarg)
from pandas.core.dtypes.common import (
is_integer_dtype, is_float_dtype, is_period_dtype)
@@ -319,20 +319,32 @@ def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self._time_shift(ordinal_delta)
- def shift(self, n):
+ @deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
+ def shift(self, periods):
"""
- Specialized shift which produces an Period Array/Index
+ Shift index by desired number of increments.
+
+ This method is for shifting the values of period indexes
+ by a specified time increment.
Parameters
----------
- n : int
- Periods to shift by
+ periods : int
+ Number of periods (or increments) to shift by,
+ can be positive or negative.
+
+ .. versionchanged:: 0.24.0
Returns
-------
- shifted : Period Array/Index
+ pandas.PeriodIndex
+ Shifted index.
+
+ See Also
+ --------
+ DatetimeIndex.shift : Shift values of DatetimeIndex.
"""
- return self._time_shift(n)
+ return self._time_shift(periods)
def _time_shift(self, n):
values = self._ndarray_values + n * self.freq.n
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8de52fbfa79f0..0b5a10283946c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8304,6 +8304,7 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
+ PeriodIndex.shift : Shift values of PeriodIndex.
Notes
-----
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index 3380a1ebc58dc..d9cbb3ea27d7b 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -97,3 +97,12 @@ def test_shift_gh8083(self):
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
+
+ def test_shift_periods(self):
+ # GH #22458 : argument 'n' was deprecated in favor of 'periods'
+ idx = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
+ tm.assert_index_equal(idx.shift(periods=0), idx)
+ tm.assert_index_equal(idx.shift(0), idx)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=True):
+ tm.assert_index_equal(idx.shift(n=0), idx)
| - [x] closes #22458
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
In order to be consistent with `Index.shift` & `Series.shift` & `DatetimeIndex.shift`, `n` argument was deprecated in favor of `periods`.
```
In [2]: idx = pd.PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
In [3]: idx
Out[3]:
PeriodIndex(['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008',
'2009'],
dtype='period[A-DEC]', freq='A-DEC')
In [4]: idx.shift(1)
Out[4]:
PeriodIndex(['2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009',
'2010'],
dtype='period[A-DEC]', freq='A-DEC')
In [5]: idx.shift(periods=1)
Out[5]:
PeriodIndex(['2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009',
'2010'],
dtype='period[A-DEC]', freq='A-DEC')
In [6]: idx.shift(n=1)
//anaconda/envs/pandas_dev/bin/ipython:1: FutureWarning: the 'n' keyword is deprecated, use 'periods' instead
#!//anaconda/envs/pandas_dev/bin/python
Out[6]:
PeriodIndex(['2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009',
'2010'],
dtype='period[A-DEC]', freq='A-DEC')
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/22912 | 2018-09-30T18:54:54Z | 2018-10-08T19:13:20Z | 2018-10-08T19:13:20Z | 2018-10-08T20:27:15Z |
DEPR/CLN: Clean up to_dense signature deprecations | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 16f0b9ee99909..83704987abdb7 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -440,7 +440,7 @@ In addition to these API breaking changes, many :ref:`performance improvements a
Raise ValueError in ``DataFrame.to_dict(orient='index')``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Bug in :func:`DataFrame.to_dict` raises ``ValueError`` when used with
+Bug in :func:`DataFrame.to_dict` raises ``ValueError`` when used with
``orient='index'`` and a non-unique index instead of losing data (:issue:`22801`)
.. ipython:: python
@@ -448,7 +448,7 @@ Bug in :func:`DataFrame.to_dict` raises ``ValueError`` when used with
df = pd.DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A'])
df
-
+
df.to_dict(orient='index')
.. _whatsnew_0240.api.datetimelike.normalize:
@@ -747,6 +747,8 @@ Removal of prior version deprecations/changes
- :meth:`Categorical.searchsorted` and :meth:`Series.searchsorted` have renamed the ``v`` argument to ``value`` (:issue:`14645`)
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
+- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
+- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
.. _whatsnew_0240.performance:
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index f5e54e4425444..8588d377b6516 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -1249,31 +1249,19 @@ def map(self, mapper):
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
- def get_values(self, fill=None):
- """ return a dense representation """
- # TODO: deprecate for to_dense?
- return self.to_dense(fill=fill)
-
- def to_dense(self, fill=None):
+ def to_dense(self):
"""
Convert SparseArray to a NumPy array.
- Parameters
- ----------
- fill: float, default None
- .. deprecated:: 0.20.0
- This argument is not respected by this function.
-
Returns
-------
arr : NumPy array
"""
- if fill is not None:
- warnings.warn(("The 'fill' parameter has been deprecated and "
- "will be removed in a future version."),
- FutureWarning, stacklevel=2)
return np.asarray(self, dtype=self.sp_values.dtype)
+ # TODO: Look into deprecating this in favor of `to_dense`.
+ get_values = to_dense
+
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 35ddd623878d0..5a747c6e4b1d1 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -439,33 +439,16 @@ def _set_values(self, key, value):
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
- def to_dense(self, sparse_only=False):
+ def to_dense(self):
"""
Convert SparseSeries to a Series.
- Parameters
- ----------
- sparse_only : bool, default False
- .. deprecated:: 0.20.0
- This argument will be removed in a future version.
-
- If True, return just the non-sparse values, or the dense version
- of `self.values` if False.
-
Returns
-------
s : Series
"""
- if sparse_only:
- warnings.warn(("The 'sparse_only' parameter has been deprecated "
- "and will be removed in a future version."),
- FutureWarning, stacklevel=2)
- int_index = self.sp_index.to_int_index()
- index = self.index.take(int_index.indices)
- return Series(self.sp_values, index=index, name=self.name)
- else:
- return Series(self.values.to_dense(), index=self.index,
- name=self.name)
+ return Series(self.values.to_dense(), index=self.index,
+ name=self.name)
@property
def density(self):
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 0257d996228df..440dbc3de83bb 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -533,33 +533,21 @@ def test_shape(self, data, shape, dtype):
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
- def test_to_dense(self):
- vals = np.array([1, np.nan, np.nan, 3, np.nan])
- res = SparseArray(vals).to_dense()
- tm.assert_numpy_array_equal(res, vals)
-
- res = SparseArray(vals, fill_value=0).to_dense()
- tm.assert_numpy_array_equal(res, vals)
-
- vals = np.array([1, np.nan, 0, 3, 0])
- res = SparseArray(vals).to_dense()
- tm.assert_numpy_array_equal(res, vals)
-
- res = SparseArray(vals, fill_value=0).to_dense()
- tm.assert_numpy_array_equal(res, vals)
-
- vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
- res = SparseArray(vals).to_dense()
- tm.assert_numpy_array_equal(res, vals)
-
- res = SparseArray(vals, fill_value=0).to_dense()
+ @pytest.mark.parametrize("vals", [
+ [np.nan, np.nan, np.nan, np.nan, np.nan],
+ [1, np.nan, np.nan, 3, np.nan],
+ [1, np.nan, 0, 3, 0],
+ ])
+ @pytest.mark.parametrize("method", ["to_dense", "get_values"])
+ @pytest.mark.parametrize("fill_value", [None, 0])
+ def test_dense_repr(self, vals, fill_value, method):
+ vals = np.array(vals)
+ arr = SparseArray(vals, fill_value=fill_value)
+ dense_func = getattr(arr, method)
+
+ res = dense_func()
tm.assert_numpy_array_equal(res, vals)
- # see gh-14647
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- SparseArray(vals).to_dense(fill=2)
-
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 22fca1e6b05e8..6a5821519866e 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -192,15 +192,6 @@ def test_sparse_to_dense(self):
series = self.bseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='bseries'))
- # see gh-14647
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- series = self.bseries.to_dense(sparse_only=True)
-
- indexer = np.isfinite(arr)
- exp = Series(arr[indexer], index=index[indexer], name='bseries')
- tm.assert_series_equal(series, exp)
-
series = self.iseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='iseries'))
| Also deprecate `fill` in `.get_values` because its parameter was being passed to `.to_dense`, which
no longer accepts the `fill` parameter.
xref #14686.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22910 | 2018-09-30T18:43:56Z | 2018-10-18T17:30:04Z | 2018-10-18T17:30:04Z | 2018-10-18T18:53:01Z |
TST: solve test failure on 32bit systems | diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 99a909849822b..a753e925b0ed8 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1379,7 +1379,7 @@ def test_get_indexer_with_NA_values(self, unique_nulls_fixture,
index = pd.Index(arr, dtype=np.object)
result = index.get_indexer([unique_nulls_fixture,
unique_nulls_fixture2, 'Unknown'])
- expected = np.array([0, 1, -1], dtype=np.int64)
+ expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
| - [x] closes #22813
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
#22296 introduced a test, where ``dtype=np.int64`` was used, but ``np.intp`` is needed for the test to pass on 32bit systems also. This fixes that.
After this PR, the pandas test suite passes with no failures on my 32bit python installation on Windows 10.
| https://api.github.com/repos/pandas-dev/pandas/pulls/22908 | 2018-09-30T16:03:58Z | 2018-10-11T11:21:39Z | 2018-10-11T11:21:39Z | 2018-10-27T08:15:48Z |
STYLE: Added explicit exceptions | diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index c1a9a9fc1ed13..713a5b1120beb 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -33,7 +33,7 @@ def load_reduce(self):
cls = args[0]
stack[-1] = object.__new__(cls)
return
- except:
+ except TypeError:
pass
# try to re-encode the arguments
@@ -44,7 +44,7 @@ def load_reduce(self):
try:
stack[-1] = func(*args)
return
- except:
+ except TypeError:
pass
# unknown exception, re-raise
@@ -182,7 +182,7 @@ def load_newobj_ex(self):
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
-except:
+except (AttributeError, KeyError):
pass
@@ -210,5 +210,5 @@ def load(fh, encoding=None, compat=False, is_verbose=False):
up.is_verbose = is_verbose
return up.load()
- except:
+ except (ValueError, TypeError):
raise
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index b9c89c4e314f9..0497a827e2e1b 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -294,7 +294,7 @@ def _apply_rule(self, dates):
def register(cls):
try:
name = cls.name
- except:
+ except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
@@ -426,7 +426,7 @@ def merge_class(base, other):
"""
try:
other = other.rules
- except:
+ except AttributeError:
pass
if not isinstance(other, list):
@@ -435,7 +435,7 @@ def merge_class(base, other):
try:
base = base.rules
- except:
+ except AttributeError:
pass
if not isinstance(base, list):
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 5600834f3b615..03fc82a3acef5 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -21,7 +21,7 @@ def get_sys_info():
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
- except:
+ except (OSError, ValueError):
pass
else:
if pipe.returncode == 0:
@@ -50,7 +50,7 @@ def get_sys_info():
("LANG", "{lang}".format(lang=os.environ.get('LANG', "None"))),
("LOCALE", '.'.join(map(str, locale.getlocale()))),
])
- except:
+ except (KeyError, ValueError):
pass
return blob
@@ -108,7 +108,7 @@ def show_versions(as_json=False):
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
- except:
+ except ImportError:
deps_blob.append((modname, None))
if (as_json):
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index a96563051e7de..e51e0c88e5b95 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -59,7 +59,7 @@ def _check_for_default_values(fname, arg_val_dict, compat_args):
# could not compare them directly, so try comparison
# using the 'is' operator
- except:
+ except ValueError:
match = (arg_val_dict[key] is compat_args[key])
if not match:
| - [x] xref #22877
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Fixed the following bare excepts:
> pandas/compat/pickle_compat.py:36:13: E722 do not use bare except'
> pandas/compat/pickle_compat.py:47:13: E722 do not use bare except'
> pandas/compat/pickle_compat.py:185:1: E722 do not use bare except'
> pandas/compat/pickle_compat.py:213:5: E722 do not use bare except'
> pandas/tseries/holiday.py:297:5: E722 do not use bare except'
> pandas/tseries/holiday.py:429:9: E722 do not use bare except'
> pandas/tseries/holiday.py:438:9: E722 do not use bare except'
> pandas/util/_print_versions.py:24:9: E722 do not use bare except'
> pandas/util/_print_versions.py:53:5: E722 do not use bare except'
> pandas/util/_print_versions.py:111:9: E722 do not use bare except'
> pandas/util/_validators.py:62:9: E722 do not use bare except'
| https://api.github.com/repos/pandas-dev/pandas/pulls/22907 | 2018-09-30T08:53:53Z | 2018-10-01T11:52:56Z | 2018-10-01T11:52:56Z | 2018-10-01T14:57:19Z |
DOC: Fix quantile docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4e8b4e3a6bec..b19391afc7c69 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7390,20 +7390,26 @@ def f(s):
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
- Return values at the given quantile over requested axis.
+ Return value(s) at the given quantile over requested axis.
+
+ This function returns the Series of 'q' quantile value(s)
+ from the DataFrame, dividing data points into groups
+ along `axis` axis
+ In case of insufficient number of data points for clean division
+ into groups, specify `interpolation` scheme to implement.
Parameters
----------
- q : float or array-like, default 0.5 (50% quantile)
- 0 <= q <= 1, the quantile(s) to compute
- axis : {0, 1, 'index', 'columns'} (default 0)
- 0 or 'index' for row-wise, 1 or 'columns' for column-wise
- numeric_only : boolean, default True
+ q : float or array-like, default 0.5
+ The quantile(s) to compute,
+ should be a float between 0 and 1 (inclusive),
+ 0.5 is equivalent to calculate 50% quantile value ie the median.
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ For row-wise : 0 or'index', for column-wise : 1 or 'columns'.
+ numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
- computed as well
+ computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
- .. versionadded:: 0.18.0
-
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
@@ -7416,46 +7422,58 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
Returns
-------
- quantiles : Series or DataFrame
-
- - If ``q`` is an array, a DataFrame will be returned where the
- index is ``q``, the columns are the columns of self, and the
- values are the quantiles.
- - If ``q`` is a float, a Series will be returned where the
- index is the columns of self and the values are the quantiles.
-
- Examples
- --------
-
- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
- columns=['a', 'b'])
- >>> df.quantile(.1)
- a 1.3
- b 3.7
- dtype: float64
- >>> df.quantile([.1, .5])
- a b
- 0.1 1.3 3.7
- 0.5 2.5 55.0
-
- Specifying `numeric_only=False` will also compute the quantile of
- datetime and timedelta data.
-
- >>> df = pd.DataFrame({'A': [1, 2],
- 'B': [pd.Timestamp('2010'),
- pd.Timestamp('2011')],
- 'C': [pd.Timedelta('1 days'),
- pd.Timedelta('2 days')]})
- >>> df.quantile(0.5, numeric_only=False)
- A 1.5
- B 2010-07-02 12:00:00
- C 1 days 12:00:00
- Name: 0.5, dtype: object
+ Series
See Also
--------
pandas.core.window.Rolling.quantile
+ Returns the rolling quantile for the DataFrame.
numpy.percentile
+ Returns 'nth' percentile for numpy arrays.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> d = {'animal':['Cheetah','Falcon','Eagle','Goose','Pigeon'],
+ ... 'class':['mammal','bird','bird','bird','bird'],
+ ... 'max_speed':[120,np.nan,320,142,150]}
+ >>> df = pd.DataFrame(data=d)
+ >>> df
+ animal class max_speed
+ 0 Cheetah mammal 120.0
+ 1 Falcon bird NaN
+ 2 Eagle bird 320.0
+ 3 Goose bird 142.0
+ 4 Pigeon bird 150.0
+
+ The `max_speed` in sorted order:-
+
+ >>> df['max_speed'].sort_values(ascending=False)
+ 2 320.0
+ 4 150.0
+ 3 142.0
+ 0 120.0
+ 1 NaN
+ Name: max_speed, dtype: float64
+
+ >>> df.quantile()
+ max_speed 146.0
+ Name: 0.5, dtype: float64
+
+ The above was calculated by interpolating between available values.
+
+ >>> df.quantile(q=[0.05,0.95])
+ max_speed
+ 0.05 123.3
+ 0.95 294.5
+ >>> df.quantile(q=[0.05,0.95],interpolation="higher")
+ max_speed
+ 0.05 142.0
+ 0.95 320.0
+ >>> df.quantile(q=[0.05,0.95],interpolation="lower")
+ max_speed
+ 0.05 120.0
+ 0.95 150.0
"""
self._check_percentile(q)
| …tput of example 1 Added summary for **See Also** functions Some typo fixes .
- [x] closes #22898
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22906 | 2018-09-30T07:49:27Z | 2018-12-07T12:46:19Z | null | 2018-12-07T12:56:36Z |
DOC GH22897 Fix docstring of join in pandas/core/frame.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4e8b4e3a6bec..a5add27ed30d8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6440,123 +6440,122 @@ def append(self, other, ignore_index=False,
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
- Join columns with other DataFrame either on index or on a key
- column. Efficiently Join multiple DataFrame objects by index at once by
+ Join columns of another DataFrame.
+
+ Join columns with `other` DataFrame either on index or on a key
+ column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
- other : DataFrame, Series with name field set, or list of DataFrame
+ other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
- used as the column name in the resulting joined DataFrame
- on : name, tuple/list of names, or array-like
+ used as the column name in the resulting joined DataFrame.
+ on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
- the calling DataFrame. Like an Excel VLOOKUP operation
- how : {'left', 'right', 'outer', 'inner'}, default: 'left'
+ the calling DataFrame. Like an Excel VLOOKUP operation.
+ how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
- * right: use other frame's index
+ * right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
- specified) with other frame's index, and sort it
- lexicographically
+ specified) with `other`'s index, and sort it.
+ lexicographically.
* inner: form intersection of calling frame's index (or column if
- on is specified) with other frame's index, preserving the order
- of the calling's one
- lsuffix : string
- Suffix to use from left frame's overlapping columns
- rsuffix : string
- Suffix to use from right frame's overlapping columns
- sort : boolean, default False
+ on is specified) with `other`'s index, preserving the order
+ of the calling's one.
+ lsuffix : str, default ''
+ Suffix to use from left frame's overlapping columns.
+ rsuffix : str, default ''
+ Suffix to use from right frame's overlapping columns.
+ sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
- the order of the join key depends on the join type (how keyword)
+ the order of the join key depends on the join type (how keyword).
+
+ Returns
+ -------
+ DataFrame
+ A dataframe containing columns from both the caller and `other`.
Notes
-----
- on, lsuffix, and rsuffix options are not supported when passing a list
- of DataFrame objects
+ Options `on`, `lsuffix`, and `rsuffix` options are not supported
+ when passing a list of DataFrame objects.
Support for specifying index levels as the `on` parameter was added
- in version 0.23.0
+ in version 0.23.0.
+
+ See Also
+ --------
+ DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
- >>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
+
+ >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
- >>> caller
- A key
- 0 A0 K0
- 1 A1 K1
- 2 A2 K2
- 3 A3 K3
- 4 A4 K4
- 5 A5 K5
+ >>> df
+ key A
+ 0 K0 A0
+ 1 K1 A1
+ 2 K2 A2
+ 3 K3 A3
+ 4 K4 A4
+ 5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
- B key
- 0 B0 K0
- 1 B1 K1
- 2 B2 K2
+ key B
+ 0 K0 B0
+ 1 K1 B1
+ 2 K2 B2
Join DataFrames using their indexes.
- >>> caller.join(other, lsuffix='_caller', rsuffix='_other')
-
- >>> A key_caller B key_other
- 0 A0 K0 B0 K0
- 1 A1 K1 B1 K1
- 2 A2 K2 B2 K2
- 3 A3 K3 NaN NaN
- 4 A4 K4 NaN NaN
- 5 A5 K5 NaN NaN
-
+ >>> df.join(other, lsuffix='_caller', rsuffix='_other')
+ key_caller A key_other B
+ 0 K0 A0 K0 B0
+ 1 K1 A1 K1 B1
+ 2 K2 A2 K2 B2
+ 3 K3 A3 NaN NaN
+ 4 K4 A4 NaN NaN
+ 5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
- the index in both caller and other. The joined DataFrame will have
+ the index in both `df` and `other`. The joined DataFrame will have
key as its index.
- >>> caller.set_index('key').join(other.set_index('key'))
-
- >>> A B
- key
- K0 A0 B0
- K1 A1 B1
- K2 A2 B2
- K3 A3 NaN
- K4 A4 NaN
- K5 A5 NaN
-
- Another option to join using the key columns is to use the on
- parameter. DataFrame.join always uses other's index but we can use any
- column in the caller. This method preserves the original caller's
+ >>> df.set_index('key').join(other.set_index('key'))
+ A B
+ key
+ K0 A0 B0
+ K1 A1 B1
+ K2 A2 B2
+ K3 A3 NaN
+ K4 A4 NaN
+ K5 A5 NaN
+
+ Another option to join using the key columns is to use the `on`
+ parameter. DataFrame.join always uses `other`'s index but we can use
+ any column in `df`. This method preserves the original DataFrame's
index in the result.
- >>> caller.join(other.set_index('key'), on='key')
-
- >>> A key B
- 0 A0 K0 B0
- 1 A1 K1 B1
- 2 A2 K2 B2
- 3 A3 K3 NaN
- 4 A4 K4 NaN
- 5 A5 K5 NaN
-
-
- See also
- --------
- DataFrame.merge : For column(s)-on-columns(s) operations
-
- Returns
- -------
- joined : DataFrame
+ >>> df.join(other.set_index('key'), on='key')
+ key A B
+ 0 K0 A0 B0
+ 1 K1 A1 B1
+ 2 K2 A2 B2
+ 3 K3 A3 NaN
+ 4 K4 A4 NaN
+ 5 K5 A5 NaN
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
| - [X] closes #22897
- [X] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
#22900 was dealt with by adding an "import pandas as pd" line to the start of the examples section. | https://api.github.com/repos/pandas-dev/pandas/pulls/22904 | 2018-09-30T07:22:17Z | 2018-11-03T07:12:49Z | null | 2018-11-03T07:12:49Z |
DOC: #22899, Fixed docstring of itertuples in pandas/core/frame.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d5b273f37a3a2..8c6b216ad196e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -883,43 +883,66 @@ def iterrows(self):
def itertuples(self, index=True, name="Pandas"):
"""
- Iterate over DataFrame rows as namedtuples, with index value as first
- element of the tuple.
+ Iterate over DataFrame rows as namedtuples.
Parameters
----------
- index : boolean, default True
+ index : bool, default True
If True, return the index as the first element of the tuple.
- name : string, default "Pandas"
+ name : str, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
+ Yields
+ -------
+ collections.namedtuple
+ Yields a namedtuple for each row in the DataFrame with the first
+ field possibly being the index and following fields being the
+ column values.
+
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
- See also
+ See Also
--------
- iterrows : Iterate over DataFrame rows as (index, Series) pairs.
- iteritems : Iterate over (column name, Series) pairs.
+ DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
+ pairs.
+ DataFrame.iteritems : Iterate over (column name, Series) pairs.
Examples
--------
-
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
- index=['a', 'b'])
+ >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
+ ... index=['dog', 'hawk'])
>>> df
- col1 col2
- a 1 0.1
- b 2 0.2
+ num_legs num_wings
+ dog 4 0
+ hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
- Pandas(Index='a', col1=1, col2=0.10000000000000001)
- Pandas(Index='b', col1=2, col2=0.20000000000000001)
+ Pandas(Index='dog', num_legs=4, num_wings=0)
+ Pandas(Index='hawk', num_legs=2, num_wings=2)
+
+ By setting the `index` parameter to False we can remove the index
+ as the first element of the tuple:
+
+ >>> for row in df.itertuples(index=False):
+ ... print(row)
+ ...
+ Pandas(num_legs=4, num_wings=0)
+ Pandas(num_legs=2, num_wings=2)
+
+ With the `name` parameter set we set a custom name for the yielded
+ namedtuples:
+ >>> for row in df.itertuples(name='Animal'):
+ ... print(row)
+ ...
+ Animal(Index='dog', num_legs=4, num_wings=0)
+ Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = []
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
#22899
I went ahead and followed the docstring conventions mentioned here:
https://pandas.pydata.org/pandas-docs/stable/contributing_docstring.html
The fixes I made were for the following errors:
<img width="891" alt="screen shot 2018-09-29 at 19 17 59" src="https://user-images.githubusercontent.com/16315857/46252338-740df400-c41c-11e8-9c29-ff955c9b9705.png">
<img width="618" alt="screen shot 2018-09-29 at 19 18 08" src="https://user-images.githubusercontent.com/16315857/46252339-76704e00-c41c-11e8-89d5-adab168dc329.png">
- Fixed whitespace between closing line and end of docstring
- Converted summary into single line
- Added `Returns` section (Not sure if done correctly!)
- Fixed examples to pass test by adding `...` at line 914
- Fixed namedtuple return values at 922 and 923
Please let me know if there are any issues and I will try my best to fix them ASAP.
Thank you!~ | https://api.github.com/repos/pandas-dev/pandas/pulls/22902 | 2018-09-30T02:21:18Z | 2018-10-07T22:40:48Z | 2018-10-07T22:40:48Z | 2018-10-07T22:40:51Z |
CLN GH22873 Replace base excepts in pandas/core | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 851c1a3fbd6e9..f83185173c3e3 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -834,4 +834,3 @@ Other
- :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`)
- :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly.
- Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`)
--
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 2bd1b0c5b3507..e08df3e340138 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -411,7 +411,7 @@ def visit_Subscript(self, node, **kwargs):
slobj = self.visit(node.slice)
try:
value = value.value
- except:
+ except AttributeError:
pass
try:
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index e2b9e246aee50..5f0b71d4505c2 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -467,7 +467,7 @@ def is_timedelta64_dtype(arr_or_dtype):
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
- except:
+ except (TypeError, ValueError, SyntaxError):
return False
return issubclass(tipo, np.timedelta64)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index d879ded4f0f09..fe5cc9389a8ba 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -358,11 +358,11 @@ def construct_from_string(cls, string):
try:
if string == 'category':
return cls()
- except:
+ else:
+ raise TypeError("cannot construct a CategoricalDtype")
+ except AttributeError:
pass
- raise TypeError("cannot construct a CategoricalDtype")
-
@staticmethod
def validate_ordered(ordered):
"""
@@ -519,7 +519,7 @@ def __new__(cls, unit=None, tz=None):
if m is not None:
unit = m.groupdict()['unit']
tz = m.groupdict()['tz']
- except:
+ except TypeError:
raise ValueError("could not construct DatetimeTZDtype")
elif isinstance(unit, compat.string_types):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4e8b4e3a6bec..dfd0b1624f9d6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3260,7 +3260,7 @@ def _ensure_valid_index(self, value):
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
- except:
+ except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
@@ -7747,7 +7747,7 @@ def convert(v):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
- except:
+ except (ValueError, TypeError):
values = convert(values)
else:
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 5a37e03b700f9..289970aaf3a82 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -139,7 +139,7 @@ def searchsorted(self, value, side="left", sorter=None):
# xref: https://github.com/numpy/numpy/issues/5370
try:
value = self.dtype.type(value)
- except:
+ except ValueError:
pass
return super(FrozenNDArray, self).searchsorted(
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3e6b934e1e863..119a607fc0e68 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -6,6 +6,7 @@
import numpy as np
from pandas._libs import algos as libalgos, index as libindex, lib, Timestamp
+from pandas._libs import tslibs
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
@@ -1002,12 +1003,13 @@ def _try_mi(k):
return _try_mi(key)
except (KeyError):
raise
- except:
+ except (IndexError, ValueError, TypeError):
pass
try:
return _try_mi(Timestamp(key))
- except:
+ except (KeyError, TypeError,
+ IndexError, ValueError, tslibs.OutOfBoundsDatetime):
pass
raise InvalidIndexError(key)
@@ -1686,7 +1688,7 @@ def append(self, other):
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
- except:
+ except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs):
@@ -2315,7 +2317,7 @@ def maybe_droplevels(indexer, levels, drop_level):
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
- except:
+ except ValueError:
# no dropping here
return orig_index
@@ -2818,7 +2820,7 @@ def _convert_can_do_setop(self, other):
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
- except:
+ except TypeError:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b63f874abff85..150518aadcfd9 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2146,7 +2146,7 @@ def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
- except:
+ except IndexingError:
pass
retval = self.obj
@@ -2705,13 +2705,13 @@ def maybe_droplevels(index, key):
for _ in key:
try:
index = index.droplevel(0)
- except:
+ except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
- except:
+ except ValueError:
pass
return index
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 6576db9f642a6..0e57dd33b1c4e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -666,7 +666,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
newb = make_block(values, placement=self.mgr_locs,
klass=klass, ndim=self.ndim)
- except:
+ except Exception: # noqa: E722
if errors == 'raise':
raise
newb = self.copy() if copy else self
@@ -1142,7 +1142,7 @@ def check_int_bool(self, inplace):
# a fill na type method
try:
m = missing.clean_fill_method(method)
- except:
+ except ValueError:
m = None
if m is not None:
@@ -1157,7 +1157,7 @@ def check_int_bool(self, inplace):
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
- except:
+ except ValueError:
m = None
if m is not None:
@@ -2438,7 +2438,7 @@ def set(self, locs, values, check=False):
try:
if (self.values[locs] == values).all():
return
- except:
+ except (IndexError, ValueError):
pass
try:
self.values[locs] = values
@@ -3172,7 +3172,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
def __len__(self):
try:
return self.sp_index.length
- except:
+ except AttributeError:
return 0
def copy(self, deep=True, mgr=None):
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 7619d47cbc8f9..232d030da7f1e 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -503,7 +503,8 @@ def reduction(values, axis=None, skipna=True):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
- except:
+ except (AttributeError, TypeError,
+ ValueError, np.core._internal.AxisError):
result = np.nan
else:
result = getattr(values, meth)(axis)
@@ -815,7 +816,7 @@ def _ensure_numeric(x):
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
- except:
+ except (TypeError, ValueError):
x = x.astype(np.float64)
else:
if not np.any(x.imag):
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 70fe7de0a973e..ad187b08e0742 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -1545,7 +1545,8 @@ def na_op(x, y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
- except:
+ except (TypeError, ValueError, AttributeError,
+ OverflowError, NotImplementedError):
raise TypeError("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]"
.format(dtype=x.dtype,
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index eb07e5ef6c85f..186a2490a5f2e 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -306,7 +306,7 @@ def __setstate__(self, state):
def __len__(self):
try:
return self.sp_index.length
- except:
+ except AttributeError:
return 0
def __unicode__(self):
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 4a5290a90313d..eb8d2b0b6c809 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -244,7 +244,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
- except:
+ except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
@@ -334,7 +334,7 @@ def _adjust_to_origin(arg, origin, unit):
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
- except:
+ except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
@@ -731,21 +731,21 @@ def calc_with_mask(carg, mask):
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
- except:
+ except ValueError:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
- except:
+ except ValueError:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
- except:
+ except ValueError:
pass
return None
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 5cdf62d5a5537..4281d66a640e3 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -2504,7 +2504,7 @@ def _offset(window, center):
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
- except:
+ except TypeError:
return offset.astype(int)
| This cleaning fix adds specific exception classes to bare except statements to correct PEP8 linting errors. The classes were chosen by a mixture of educated guessing (i.e. if a statement gets the `length` property, it might raise an `AttributeError`), trying out functions on known edge cases (i.e. giving a datetime input that is too large gives `tslibs.OutOfBoundsDatetime`), and inspecting functions in the `try` block to see what kind of errors they raise.
- [x] xref #22873
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/22901 | 2018-09-30T00:08:17Z | 2018-10-02T21:16:26Z | 2018-10-02T21:16:26Z | 2018-10-03T13:15:40Z |
DOC: updated the docstring of Series.dot | diff --git a/pandas/core/series.py b/pandas/core/series.py
index a613b22ea9046..141d738d0c3bf 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2080,16 +2080,53 @@ def autocorr(self, lag=1):
def dot(self, other):
"""
- Matrix multiplication with DataFrame or inner-product with Series
- objects. Can also be called using `self @ other` in Python >= 3.5.
+ Compute the dot product between the Series and the columns of other.
+
+ This method computes the dot product between the Series and another
+ one, or the Series and each columns of a DataFrame, or the Series and
+ each columns of an array.
+
+ It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
- other : Series or DataFrame
+ other : Series, DataFrame or array-like
+ The other object to compute the dot product with its columns.
Returns
-------
- dot_product : scalar or Series
+ scalar, Series or numpy.ndarray
+ Return the dot product of the Series and other if other is a
+ Series, the Series of the dot product of Series and each rows of
+ other if other is a DataFrame or a numpy.ndarray between the Series
+ and each columns of the numpy array.
+
+ See Also
+ --------
+ DataFrame.dot: Compute the matrix product with the DataFrame.
+ Series.mul: Multiplication of series and other, element-wise.
+
+ Notes
+ -----
+ The Series and other has to share the same index if other is a Series
+ or a DataFrame.
+
+ Examples
+ --------
+ >>> s = pd.Series([0, 1, 2, 3])
+ >>> other = pd.Series([-1, 2, -3, 4])
+ >>> s.dot(other)
+ 8
+ >>> s @ other
+ 8
+ >>> df = pd.DataFrame([[0 ,1], [-2, 3], [4, -5], [6, 7]])
+ >>> s.dot(df)
+ 0 24
+ 1 14
+ dtype: int64
+ >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
+ >>> s.dot(arr)
+ array([24, 14])
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/22890 | 2018-09-29T18:54:54Z | 2018-11-03T07:04:42Z | 2018-11-03T07:04:42Z | 2018-11-03T07:04:52Z |
BLD: minor break ci/requirements-optional-pip.txt | diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt
index 2e1bf0ca22bcf..09ce8e59a3b46 100644
--- a/ci/requirements-optional-pip.txt
+++ b/ci/requirements-optional-pip.txt
@@ -14,7 +14,7 @@ lxml
matplotlib
nbsphinx
numexpr
-openpyxl=2.5.5
+openpyxl==2.5.5
pyarrow
pymysql
tables
@@ -28,4 +28,4 @@ statsmodels
xarray
xlrd
xlsxwriter
-xlwt
+xlwt
\ No newline at end of file
diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py
index aabeb24a0c3c8..3ff157e0a0d7b 100755
--- a/scripts/convert_deps.py
+++ b/scripts/convert_deps.py
@@ -1,6 +1,7 @@
"""
Convert the conda environment.yaml to a pip requirements.txt
"""
+import re
import yaml
exclude = {'python=3'}
@@ -15,6 +16,7 @@
required = dev['dependencies']
required = [rename.get(dep, dep) for dep in required if dep not in exclude]
optional = [rename.get(dep, dep) for dep in optional if dep not in exclude]
+optional = [re.sub("(?<=[^<>])=", '==', dep) for dep in optional]
with open("ci/requirements_dev.txt", 'wt') as f:
| - [x] closes #22595
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
When adding optional dependencies, below error appears from
> Invalid requirement: 'openpyxl=2.5.5'
> = is not a valid operator. Did you mean == ?
To core dev, should we fix the version or simply specify lower bound (i.e. >=)? | https://api.github.com/repos/pandas-dev/pandas/pulls/22889 | 2018-09-29T18:27:48Z | 2018-09-30T13:59:47Z | 2018-09-30T13:59:47Z | 2018-09-30T14:08:55Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.